Merge release-4-6 into master
authorRoland Schulz <roland@utk.edu>
Tue, 15 Jan 2013 21:00:15 +0000 (16:00 -0500)
committerRoland Schulz <roland@utk.edu>
Tue, 15 Jan 2013 22:50:22 +0000 (17:50 -0500)
Ontop of resolving conflicts:
- Removed any newly added GMX.*EXPORT
  (no visibility yet in master)
- Adapted gmxTestCXX11.cmake to the new approach of setting flags
  without caching
- Removed deleted g_dih from symlinking and legacymodules.cpp
- Fix gmx_cpuid.c (also part of I0ad9ca77b)

Changes not applied
- Template building (c22f43f2932c0332c939cfc44)
- Removed libmd renamed (3f231be160c46)

Conflicts:
CMakeLists.txt (mostly easy - moved comment about
    Windows and BUILD_SHARED_LIBS, and changes related to
            gmxTestCXX11)
cmake/gmxCFlags.cmake
include/nbsearch.h (only Export ignored)
include/trajana.h (only Export ignored)
share/template/CMakeLists.txt (changes not applied)
src/gmxlib/CMakeLists.txt (changes not applied)
src/gmxlib/cuda_tools/copyrite_gpu.cu (moved to src/gromacs)
src/gmxlib/gpu_utils/CMakeLists.txt (only exports - ignored)
src/gromacs/legacyheaders/edsam.h
src/gromacs/legacyheaders/gmx_ana.h
src/gromacs/legacyheaders/gmx_cpuid.h
src/gromacs/libgromacs.pc.cmakein
src/gromacs/mdlib/edsam.c
src/gromacs/mdlib/forcerec.c
src/gromacs/selection/compiler.cpp
src/kernel/CMakeLists.txt
src/mdlib/CMakeLists.txt (ignored Export&rename)
src/mdlib/libmd.pc.cmakein (only rename - ignored)
src/mdlib/nbnxn_cuda/CMakeLists.txt (only Export ignored)
src/tools/CMakeLists.txt
src/tools/gmx_dih.c
src/tools/gmx_sans.c

Change-Id: I28541d3c871feb7261c685793b36045e1806014d

202 files changed:
1  2 
CMakeLists.txt
admin/programs.txt
cmake/FindFFTW.cmake
cmake/Platform/BlueGeneL-static-XL-C.cmake
cmake/Platform/BlueGeneP-static-XL-C.cmake
cmake/gmxCFlags.cmake
cmake/gmxManageGPU.cmake
cmake/gmxManageMPI.cmake
cmake/gmxTestCXX11.cmake
scripts/GMXRC.cmakein
share/html/online.html
src/config.h.cmakein
src/gromacs/CMakeLists.txt
src/gromacs/gmxlib/atomprop.c
src/gromacs/gmxlib/checkpoint.c
src/gromacs/gmxlib/copyrite.c
src/gromacs/gmxlib/cuda_tools/copyrite_gpu.cu
src/gromacs/gmxlib/filenm.c
src/gromacs/gmxlib/gmx_cpuid.c
src/gromacs/gmxlib/gmx_detect_hardware.c
src/gromacs/gmxlib/gmx_omp_nthreads.c
src/gromacs/gmxlib/ifunc.c
src/gromacs/gmxlib/mshift.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_128_fma_double/kernelutil_x86_avx_128_fma_double.h
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_128_fma_single/kernelutil_x86_avx_128_fma_single.h
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/kernelutil_x86_avx_256_double.h
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCSTab_VdwCSTab_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCSTab_VdwCSTab_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCSTab_VdwCSTab_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCSTab_VdwCSTab_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCSTab_VdwCSTab_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCSTab_VdwLJ_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCSTab_VdwLJ_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCSTab_VdwLJ_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCSTab_VdwLJ_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCSTab_VdwLJ_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCSTab_VdwNone_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCSTab_VdwNone_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCSTab_VdwNone_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCSTab_VdwNone_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCSTab_VdwNone_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCoul_VdwCSTab_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCoul_VdwCSTab_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCoul_VdwLJ_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCoul_VdwLJ_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCoul_VdwLJ_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCoul_VdwLJ_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCoul_VdwLJ_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCoul_VdwNone_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCoul_VdwNone_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCoul_VdwNone_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCoul_VdwNone_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecCoul_VdwNone_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSh_VdwLJSh_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSh_VdwLJSh_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSh_VdwNone_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSh_VdwNone_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSh_VdwNone_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSh_VdwNone_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSh_VdwNone_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSw_VdwLJSw_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSw_VdwNone_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSw_VdwNone_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSw_VdwNone_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSw_VdwNone_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEwSw_VdwNone_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEw_VdwCSTab_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEw_VdwCSTab_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEw_VdwCSTab_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEw_VdwCSTab_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEw_VdwCSTab_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEw_VdwLJ_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEw_VdwLJ_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEw_VdwLJ_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEw_VdwLJ_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEw_VdwLJ_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEw_VdwNone_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEw_VdwNone_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEw_VdwNone_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEw_VdwNone_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecEw_VdwNone_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecGB_VdwCSTab_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecGB_VdwLJ_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecGB_VdwNone_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecNone_VdwCSTab_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecNone_VdwLJSh_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecNone_VdwLJSw_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecNone_VdwLJ_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwCSTab_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwCSTab_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwLJSh_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwLJSh_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwLJSh_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwLJSh_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwLJSw_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwLJSw_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwLJSw_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwLJSw_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwLJSw_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwNone_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwNone_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwNone_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwNone_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRFCut_VdwNone_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRF_VdwCSTab_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRF_VdwCSTab_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRF_VdwCSTab_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRF_VdwCSTab_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRF_VdwCSTab_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRF_VdwLJ_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRF_VdwLJ_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRF_VdwLJ_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRF_VdwLJ_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRF_VdwLJ_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRF_VdwNone_GeomP1P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRF_VdwNone_GeomW3P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRF_VdwNone_GeomW3W3_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRF_VdwNone_GeomW4P1_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_ElecRF_VdwNone_GeomW4W4_avx_256_double.c
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_double/nb_kernel_template_avx_256_double.pre
src/gromacs/gmxlib/nonbonded/nb_kernel_avx_256_single/kernelutil_x86_avx_256_single.h
src/gromacs/gmxlib/nonbonded/nb_kernel_sse2_double/kernelutil_x86_sse2_double.h
src/gromacs/gmxlib/nonbonded/nb_kernel_sse2_single/kernelutil_x86_sse2_single.h
src/gromacs/gmxlib/nonbonded/nb_kernel_sse4_1_double/kernelutil_x86_sse4_1_double.h
src/gromacs/gmxlib/nonbonded/nb_kernel_sse4_1_single/kernelutil_x86_sse4_1_single.h
src/gromacs/gmxlib/sfactor.c
src/gromacs/gmxlib/thread_mpi/collective.c
src/gromacs/gmxlib/thread_mpi/once.c
src/gromacs/gmxlib/thread_mpi/tmpi_init.c
src/gromacs/gmxpreprocess/readir.c
src/gromacs/legacyheaders/edsam.h
src/gromacs/legacyheaders/gmx_ana.h
src/gromacs/legacyheaders/gmx_cpuid.h
src/gromacs/legacyheaders/gmx_math_x86_avx_128_fma_double.h
src/gromacs/legacyheaders/gmx_simd_macros.h
src/gromacs/legacyheaders/gmx_x86_avx_128_fma.h
src/gromacs/legacyheaders/gmx_x86_avx_256.h
src/gromacs/legacyheaders/main.h
src/gromacs/legacyheaders/mshift.h
src/gromacs/legacyheaders/thread_mpi/atomic/gcc_x86.h
src/gromacs/legacyheaders/types/filenm.h
src/gromacs/legacyheaders/types/graph.h
src/gromacs/legacyheaders/types/nb_verlet.h
src/gromacs/legacyheaders/types/nbnxn_pairlist.h
src/gromacs/legacyheaders/types/state.h
src/gromacs/legacyheaders/vec.h
src/gromacs/libgromacs.pc.cmakein
src/gromacs/mdlib/constr.c
src/gromacs/mdlib/coupling.c
src/gromacs/mdlib/domdec.c
src/gromacs/mdlib/edsam.c
src/gromacs/mdlib/fft5d.h
src/gromacs/mdlib/forcerec.c
src/gromacs/mdlib/gmx_wallcycle.c
src/gromacs/mdlib/init.c
src/gromacs/mdlib/nbnxn_atomdata.c
src/gromacs/mdlib/nbnxn_consts.h
src/gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel.cuh
src/gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_legacy.cuh
src/gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_utils.cuh
src/gromacs/mdlib/nbnxn_internal.h
src/gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_gpu_ref.c
src/gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref_outer.h
src/gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_simd_2xnn_inner.h
src/gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_simd_2xnn_outer.h
src/gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_simd_4xn_inner.h
src/gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_simd_4xn_outer.h
src/gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_simd_utils.h
src/gromacs/mdlib/nbnxn_search.c
src/gromacs/mdlib/sim_util.c
src/gromacs/mdlib/tables.c
src/programs/gmx/CreateLinks.cmake.cmakein
src/programs/gmx/legacymodules.cpp
src/programs/gmxcheck/gmxcheck.c
src/programs/grompp/convparm.c
src/programs/mdrun/md.c
src/programs/mdrun/mdrun.c
src/programs/mdrun/repl_ex.c
src/programs/mdrun/runner.c
src/programs/pdb2gmx/hizzie.c
src/programs/pdb2gmx/pdb2gmx.c
src/tools/CMakeLists.txt
src/tools/gmx_angle.c
src/tools/gmx_genpr.c
src/tools/gmx_helix.c
src/tools/gmx_make_edi.c
src/tools/gmx_rmsf.c
src/tools/gmx_tune_pme.c

diff --cc CMakeLists.txt
index ae3d23362f1309761ac092ded1cc76a34cf556bd,c33fdce9cb60a0bd6407bfbf724a0679173247e3..c81554dda3d8c8b79af41ff4acaea356381bd1d0
@@@ -2,12 -36,15 +2,11 @@@ cmake_minimum_required(VERSION 2.8
  # Keep CMake suitably quiet on Cygwin
  set(CMAKE_LEGACY_CYGWIN_WIN32 0) # Remove when CMake >= 2.8.4 is required
  
- # override bugs on OS X where Cmake picks gcc (GNU) for C instead of system default cc (Clang).
- if(APPLE)
-     set(CMAKE_C_COMPILER_INIT "cc")
- endif(APPLE)
 -# Allows CPack to act differently for normal tools and mdrun (e.g. because of MPI)
 -set(CPACK_COMPONENT_GROUP_TOOLS_DESCRIPTION "All GROMACS executable tools")
 -set(CPACK_COMPONENT_GROUP_MDRUN_DESCRIPTION "GROMACS executable for running simulations")
 -
+ # CMake modules/macros are in a subdirectory to keep this file cleaner
+ # This needs to be set before project() in order to pick up toolchain files
+ list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Platform)
  
 -project(Gromacs C)
 +project(Gromacs)
  include(Dart)
  mark_as_advanced(DART_ROOT)
  
@@@ -52,16 -85,10 +51,13 @@@ endif(
  # provide backward compatibility of software written against the Gromacs API.
  set(API_VERSION ${NUM_VERSION})
  
- # Cmake modules/macros are in a subdirectory to keep this file cleaner
- set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake)
  if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT AND UNIX)
 -set(CMAKE_INSTALL_PREFIX "/usr/local/gromacs" CACHE STRING "Installation prefix (installation will need write permissions here)" FORCE)
 +    set(CMAKE_INSTALL_PREFIX "/usr/local/gromacs" CACHE STRING "Installation prefix (installation will need write permissions here)" FORCE)
  endif()
  
 +set(GMX_INSTALL_PREFIX "" CACHE STRING "Prefix gets appended to CMAKE_INSTALL_PREFIX. For cpack it sets the root folder of the archive.")
 +mark_as_advanced(GMX_INSTALL_PREFIX)
 +
  include(gmxBuildTypeReference)
  
  if(NOT CMAKE_BUILD_TYPE)
@@@ -109,47 -136,28 +105,10 @@@ if(CMAKE_HOST_UNIX
  endif()
  
  ########################################################################
- set(CMAKE_PREFIX_PATH "" CACHE STRING "Extra locations to search for external libraries and tools (give directory without lib, bin, or include)")
- # Fix stupid flags on Windows
 -# User input options - enable C++ - before any CXX flags are changed   #
--########################################################################
- SET(SHARED_LIBS_DEFAULT ON) 
- IF( WIN32 AND NOT CYGWIN)
-   option(GMX_PREFER_STATIC_LIBS "When finding libraries prefer static system libraries (MT instead of MD)!" ON)
-   mark_as_advanced(GMX_PREFER_STATIC_LIBS)
-   SET(SHARED_LIBS_DEFAULT OFF)  #is currently not working on Windows
-   # This makes windows.h not declare min/max as macros that would break
-   # C++ code using std::min/std::max.
-   add_definitions(-DNOMINMAX)
--
-   IF (GMX_PREFER_STATIC_LIBS)
-     #Only setting Debug and Release flags. Others configurations current not used.
-     STRING(REPLACE /MD /MT CMAKE_C_FLAGS_RELEASE ${CMAKE_C_FLAGS_RELEASE})
-     SET(CMAKE_C_FLAGS_RELEASE ${CMAKE_C_FLAGS_RELEASE} CACHE STRING "" FORCE)
-     STRING(REPLACE /MD /MT CMAKE_C_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG})
-     SET(CMAKE_C_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG} CACHE STRING "" FORCE)
-     STRING(REPLACE /MD /MT CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE})
-     SET(CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE} CACHE STRING "" FORCE)
-     STRING(REPLACE /MD /MT CMAKE_CXX_FLAGS_DEBUG ${CMAKE_CXX_FLAGS_DEBUG})
-     SET(CMAKE_CXX_FLAGS_DEBUG ${CMAKE_CXX_FLAGS_DEBUG} CACHE STRING "" FORCE)
-   ENDIF()
 -# decide on GPU settings based on user-settings and GPU/CUDA detection
 -include(gmxManageGPU)
--
-   #Workaround for cmake bug 13174. Replace deprecated options.
-   IF( CMAKE_C_COMPILER_ID MATCHES "Intel" )
-     STRING(REPLACE /GZ /RTC1 CMAKE_C_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG})
-     SET(CMAKE_C_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG} CACHE STRING "" FORCE)
-   ENDIF()
-   IF( CMAKE_CXX_COMPILER_ID MATCHES "Intel" )
-     STRING(REPLACE /GZ /RTC1 CMAKE_CXX_FLAGS_DEBUG ${CMAKE_CXX_FLAGS_DEBUG})
-     STRING(REPLACE /GX /EHsc CMAKE_CXX_FLAGS_DEBUG ${CMAKE_CXX_FLAGS_DEBUG})
-     SET(CMAKE_CXX_FLAGS_DEBUG ${CMAKE_CXX_FLAGS_DEBUG} CACHE STRING "" FORCE)
 -# TODO: move OpenMM to contrib
 -option(GMX_OPENMM "Accelerated execution on GPUs through the OpenMM library (not fully supported)" OFF)
 -mark_as_advanced(GMX_OPENMM)
--
-     STRING(REPLACE /GX /EHsc CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE})
-     SET(CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE} CACHE STRING "" FORCE)
-   ENDIF()
- ENDIF()
 -option(GMX_FORCE_CXX "Enable C++ compilation even if not necessary" OFF)
 -mark_as_advanced(GMX_FORCE_CXX)
--
+ option(GMX_COOL_QUOTES "Enable Gromacs cool quotes" ON)
+ mark_as_advanced(GMX_COOL_QUOTES)
  
- ########################################################################
 -if(GMX_GPU OR GMX_OPENMM OR GMX_FORCE_CXX)
 -    enable_language(CXX)
 -endif()
+ set(CMAKE_PREFIX_PATH "" CACHE STRING "Extra locations to search for external libraries and tools (give directory without lib, bin, or include)")
 -
 -########################################################################
  # User input options                                                   #
  ########################################################################
  option(GMX_DOUBLE "Use double precision (much slower, use only if you really need it)" OFF)
@@@ -227,8 -229,21 +191,19 @@@ mark_as_advanced(GMX_SKIP_DEFAULT_CFLAG
  # These files should be removed from the source tree when a CMake version that
  # includes the features in question becomes required for building GROMACS.
  include(CheckCCompilerFlag)
 -if(CMAKE_CXX_COMPILER_LOADED)
 -    include(CheckCXXCompilerFlag)
 -endif()
 +include(CheckCXXCompilerFlag)
  
+ # First exclude compilers known to not work with OpenMP although claim to support it:
+ # gcc 4.2.1 and gcc-llvm 4.2.1 (also claims to be 4.2.1) on Mac OS X
+ # This fixes redmine 900 and needs to run before OpenMP flags are set below.
+ message("CMAKE_COMPILER_IS_GNUCC: ${CMAKE_COMPILER_IS_GNUCC}")
+ if (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND
+     CMAKE_COMPILER_IS_GNUCC AND C_COMPILER_VERSION VERSION_LESS 4.3)
+     message(STATUS "OpenMP multithreading not supported with gcc/llvm-gcc 4.2 on Mac OS X, disabled")
+     set(GMX_OPENMP OFF CACHE BOOL
+         "OpenMP multithreading not not supported with gcc/llvm-gcc 4.2 on Mac OS X, disabled!" FORCE)
+ endif()
  # OpenMP check must come before other CFLAGS!
  if(GMX_OPENMP)
      find_package(OpenMP)
@@@ -450,34 -541,45 +459,61 @@@ include(gmxManageMPI
  ########################################################################
  # Find external packages                                               #
  ########################################################################
- if(UNIX)
-     if(GMX_PREFER_STATIC_LIBS)
-         # On Linux .a is the static library suffix, on Mac OS X .lib can also
-         # be used, so we'll add both to the preference list.
-         SET(CMAKE_FIND_LIBRARY_SUFFIXES ".lib;.a" ${CMAKE_FIND_LIBRARY_SUFFIXES})
-         if(SHARED_LIBS_DEFAULT)
-             if(BUILD_SHARED_LIBS) #Warn the user about the combination. But don't overwrite the request.
-                 message(WARNING "Static libraries requested, and shared Gromacs libraries requested.")
-             elseif(NOT DEFINED BUILD_SHARED_LIBS) #Change default to OFF. Don't warn if it's already off.
-                 message(WARNING "Static libraries requested, the GROMACS libraries will also be build static (BUILD_SHARED_LIBS=OFF)")
-                 set(SHARED_LIBS_DEFAULT OFF)
-             endif()
-         endif()
-     endif()
+ if(UNIX AND GMX_PREFER_STATIC_LIBS)
+     # On Linux .a is the static library suffix, on Mac OS X .lib can also
+     # be used, so we'll add both to the preference list.
+     SET(CMAKE_FIND_LIBRARY_SUFFIXES ".lib;.a" ${CMAKE_FIND_LIBRARY_SUFFIXES})
  endif()
- option(BUILD_SHARED_LIBS "Enable shared libraries (can be problematic with MPI, Windows)" ${SHARED_LIBS_DEFAULT})
+ IF( WIN32 AND NOT CYGWIN)
++  # This makes windows.h not declare min/max as macros that would break
++  # C++ code using std::min/std::max.
++  add_definitions(-DNOMINMAX)
++
+   if (NOT BUILD_SHARED_LIBS)
+       option(GMX_PREFER_STATIC_LIBS "When finding libraries prefer static system libraries (MT instead of MD)!" ON)
+       if(NOT GMX_PREFER_STATIC_LIBS)
+           message(WARNING "Shared system libraries requested, and static Gromacs libraries requested.")
+       endif()
+   else()
++      message(FATAL_ERROR "BUILD_SHARED_LIBS not yet working for Windows in the master branch")
+       option(GMX_PREFER_STATIC_LIBS "When finding libraries prefer static system libraries (MT instead of MD)!" OFF)
+       if(GMX_PREFER_STATIC_LIBS)
+           #this combination segfaults (illigal passing of file handles)
+           message(FATAL_ERROR "Static system libraries requested, and shared Gromacs libraries requested.")
+       endif()
+       add_definitions(-DUSE_VISIBILITY -DTMPI_USE_VISIBILITY)
+       set(PKG_CFLAGS "$PKG_CFLAGS -DUSE_VISIBILITY -DTMPI_USE_VISIBILITY")
+   endif()
+   mark_as_advanced(GMX_PREFER_STATIC_LIBS)
+   IF (GMX_PREFER_STATIC_LIBS)
+       #Only setting Debug and Release flags. Others configurations are current not used.
+       STRING(REPLACE /MD /MT CMAKE_C_FLAGS_RELEASE ${CMAKE_C_FLAGS_RELEASE})
+       STRING(REPLACE /MD /MT CMAKE_C_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG})
+       if(CMAKE_CXX_COMPILER_LOADED)
+           STRING(REPLACE /MD /MT CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE})
+           STRING(REPLACE /MD /MT CMAKE_CXX_FLAGS_DEBUG ${CMAKE_CXX_FLAGS_DEBUG})
+       endif()
+   ENDIF()
+   IF( CMAKE_C_COMPILER_ID MATCHES "Intel" )
+     if(BUILD_SHARED_LIBS) #not sure why incremental building with shared libs doesn't work
+         STRING(REPLACE "/INCREMENTAL:YES" "" CMAKE_SHARED_LINKER_FLAGS ${CMAKE_SHARED_LINKER_FLAGS})
+     endif()
+   ENDIF()
+ ENDIF()
  
 +option(GMX_XML "Use libxml2 to parse xml files" ON)
 +if (GMX_XML)
 +  find_package(LibXml2)
 +  set(PKG_XML "")
 +  if(LIBXML2_FOUND)
 +    include_directories(${LIBXML2_INCLUDE_DIR})
 +    set(PKG_XML libxml-2.0)
 +    set(XML_LIBRARIES ${LIBXML2_LIBRARIES})
 +  endif(LIBXML2_FOUND)
 +endif(GMX_XML)
 +
  option(GMX_GSL "Add support for gsl" OFF)
  if (GMX_GSL)
    find_package(gsl)
@@@ -652,15 -714,6 +688,14 @@@ gmx_test_isfinite(HAVE_ISFINITE
  gmx_test__isfinite(HAVE__ISFINITE)
  gmx_test__finite(HAVE__FINITE)
  
- gmx_test_cxx11(GMX_CXX11 CXX11_FLAG)
- set(GROMACS_CXX_FLAGS "${CXX11_FLAG} ${GROMACS_CXX_FLAGS}")
 +include(gmxTestCXX11)
++gmx_test_cxx11(GMX_CXX11 GMX_CXX11_FLAGS)
 +if(CXX11_FLAG AND GMX_GPU)
 +    #FIXME: add proper solution for progate all but cxx11 flag
 +    set(CUDA_PROPAGATE_HOST_FLAGS no)
 +    message(WARNING "Please manually add compiler flags to CUDA_NVCC_FLAGS. Automatic propogation temporary not working.")
 +endif()
 +
  include(gmxTestXDR)
  gmx_test_xdr(GMX_SYSTEM_XDR)
  if(NOT GMX_SYSTEM_XDR)
@@@ -674,15 -730,17 +712,15 @@@ if(${GMX_CPU_ACCELERATION} STREQUAL "NO
      # nothing to do
  elseif(${GMX_CPU_ACCELERATION} STREQUAL "SSE2")
  
-     GMX_TEST_CFLAG(GNU_SSE2_CFLAG "-msse2" GROMACS_C_FLAGS)
+     GMX_TEST_CFLAG(GNU_SSE2_CFLAG "-msse2" ACCELERATION_C_FLAGS)
      if(NOT GNU_SSE2_CFLAG AND GMX_NATIVE_WINDOWS)
-         GMX_TEST_CFLAG(MSVC_SSE2_CFLAG "/arch:SSE2" GROMACS_C_FLAGS)
+         GMX_TEST_CFLAG(MSVC_SSE2_CFLAG "/arch:SSE2" ACCELERATION_C_FLAGS)
      endif(NOT GNU_SSE2_CFLAG AND GMX_NATIVE_WINDOWS)
  
-     GMX_TEST_CXXFLAG(GNU_SSE2_CXXFLAG "-msse2" GROMACS_CXX_FLAGS)
 -    if (CMAKE_CXX_COMPILER_LOADED)
 -        GMX_TEST_CXXFLAG(GNU_SSE2_CXXFLAG "-msse2" ACCELERATION_CXX_FLAGS)
 -        if(NOT GNU_SSE2_CXXFLAG AND GMX_NATIVE_WINDOWS)
 -            GMX_TEST_CXXFLAG(MSVC_SSE2_CXXFLAG "/arch:SSE2" ACCELERATION_CXX_FLAGS)
 -        endif(NOT GNU_SSE2_CXXFLAG AND GMX_NATIVE_WINDOWS)
 -    endif()
++    GMX_TEST_CXXFLAG(GNU_SSE2_CXXFLAG "-msse2" ACCELERATION_CXX_FLAGS)
 +    if(NOT GNU_SSE2_CXXFLAG AND GMX_NATIVE_WINDOWS)
-         GMX_TEST_CXXFLAG(MSVC_SSE2_CXXFLAG "/arch:SSE2" GROMACS_CXX_FLAGS)
++        GMX_TEST_CXXFLAG(MSVC_SSE2_CXXFLAG "/arch:SSE2" ACCELERATION_CXX_FLAGS)
 +    endif(NOT GNU_SSE2_CXXFLAG AND GMX_NATIVE_WINDOWS)
  
      # We dont warn for lacking SSE2 flag support, since that is probably standard today.
  
@@@ -715,21 -775,23 +755,21 @@@ elseif(${GMX_CPU_ACCELERATION} STREQUA
          endif()
      endif(NOT GNU_SSE4_CFLAG AND NOT MSVC_SSE4_CFLAG)
  
 -    if (CMAKE_CXX_COMPILER_LOADED)
 -        GMX_TEST_CXXFLAG(GNU_SSE4_CXXFLAG "-msse4.1" GROMACS_CXX_FLAG)
 -        if (NOT GNU_SSE4_CXXFLAG AND GMX_NATIVE_WINDOWS)
 -            GMX_TEST_CXXFLAG(MSVC_SSE4_CXXFLAG "/arch:SSE4.1" ACCELERATION_CXX_FLAGS)
 -        endif(NOT GNU_SSE4_CXXFLAG AND GMX_NATIVE_WINDOWS)
 -        if (NOT GNU_SSE4_CXXFLAG AND NOT MSVC_SSE4_CXXFLAG) 
 -            message(WARNING "No C++ SSE4.1 flag found. Consider a newer compiler, or use SSE2 for slightly lower performance.")
 -            # Not surprising if we end up here! MSVC current does not support the SSE4.1 flag. However, it appears to accept SSE4.1
 -            # intrinsics when SSE2 support is enabled, so we try that instead.
 -            if (GMX_NATIVE_WINDOWS)
 -                GMX_TEST_CXXFLAG(MSVC_SSE2_CXXFLAG "/arch:SSE2" ACCELERATION_CXX_FLAGS)
 -            endif()
 -        endif(NOT GNU_SSE4_CXXFLAG AND NOT MSVC_SSE4_CXXFLAG)
 -    endif()
 +    GMX_TEST_CXXFLAG(GNU_SSE4_CXXFLAG "-msse4.1" GROMACS_CXX_FLAG)
 +    if (NOT GNU_SSE4_CXXFLAG AND GMX_NATIVE_WINDOWS)
-        GMX_TEST_CXXFLAG(MSVC_SSE4_CXXFLAG "/arch:SSE4.1" GROMACS_CXX_FLAGS)
++        GMX_TEST_CXXFLAG(MSVC_SSE4_CXXFLAG "/arch:SSE4.1" ACCELERATION_CXX_FLAGS)
 +    endif(NOT GNU_SSE4_CXXFLAG AND GMX_NATIVE_WINDOWS)
 +    if (NOT GNU_SSE4_CXXFLAG AND NOT MSVC_SSE4_CXXFLAG)
 +        message(WARNING "No C++ SSE4.1 flag found. Consider a newer compiler, or use SSE2 for slightly lower performance.")
 +        # Not surprising if we end up here! MSVC current does not support the SSE4.1 flag. However, it appears to accept SSE4.1
 +        # intrinsics when SSE2 support is enabled, so we try that instead.
 +        if (GMX_NATIVE_WINDOWS)
-             GMX_TEST_CXXFLAG(MSVC_SSE2_CXXFLAG "/arch:SSE2" GROMACS_CXX_FLAGS)
++            GMX_TEST_CXXFLAG(MSVC_SSE2_CXXFLAG "/arch:SSE2" ACCELERATION_CXX_FLAGS)
 +        endif()
 +    endif(NOT GNU_SSE4_CXXFLAG AND NOT MSVC_SSE4_CXXFLAG)
  
      # This must come after we have added the -msse4.1 flag on some platforms.
-     check_include_file(smmintrin.h  HAVE_SMMINTRIN_H ${GROMACS_C_FLAGS})
+     check_include_file(smmintrin.h  HAVE_SMMINTRIN_H ${ACCELERATION_C_FLAGS})
  
      if(NOT HAVE_SMMINTRIN_H)
          message(FATAL_ERROR "Cannot find smmintrin.h, which is required for SSE4.1 intrinsics support.")
@@@ -755,13 -817,15 +795,13 @@@ elseif(${GMX_CPU_ACCELERATION} STREQUA
          message(WARNING "No C AVX flag found. Consider a newer compiler, or try SSE4.1 (lower performance).")
      endif (NOT GNU_AVX_CFLAG AND NOT MSVC_AVX_CFLAG)
  
-     GMX_TEST_CXXFLAG(GNU_AVX_CXXFLAG "-mavx" GROMACS_CXX_FLAGS)
 -    if (CMAKE_CXX_COMPILER_LOADED)
 -        GMX_TEST_CXXFLAG(GNU_AVX_CXXFLAG "-mavx" ACCELERATION_CXX_FLAGS)
 -        if (NOT GNU_AVX_CXXFLAG AND GMX_NATIVE_WINDOWS)
 -            GMX_TEST_CXXFLAG(MSVC_AVX_CXXFLAG "/arch:AVX" ACCELERATION_CXX_FLAGS)
 -        endif (NOT GNU_AVX_CXXFLAG AND GMX_NATIVE_WINDOWS)
 -        if (NOT GNU_AVX_CXXFLAG AND NOT MSVC_AVX_CXXFLAG)
 -            message(WARNING "No C++ AVX flag found. Consider a newer compiler, or try SSE4.1 (lower performance).")
 -        endif (NOT GNU_AVX_CXXFLAG AND NOT MSVC_AVX_CXXFLAG)
 -    endif()
++    GMX_TEST_CXXFLAG(GNU_AVX_CXXFLAG "-mavx" ACCELERATION_CXX_FLAGS)
 +    if (NOT GNU_AVX_CXXFLAG AND GMX_NATIVE_WINDOWS)
-        GMX_TEST_CXXFLAG(MSVC_AVX_CXXFLAG "/arch:AVX" GROMACS_CXX_FLAGS)
++        GMX_TEST_CXXFLAG(MSVC_AVX_CXXFLAG "/arch:AVX" ACCELERATION_CXX_FLAGS)
 +    endif (NOT GNU_AVX_CXXFLAG AND GMX_NATIVE_WINDOWS)
 +    if (NOT GNU_AVX_CXXFLAG AND NOT MSVC_AVX_CXXFLAG)
 +       message(WARNING "No C++ AVX flag found. Consider a newer compiler, or try SSE4.1 (lower performance).")
 +    endif (NOT GNU_AVX_CXXFLAG AND NOT MSVC_AVX_CXXFLAG)
  
      # Set the FMA4 flags (MSVC doesn't require any)
      if(${GMX_CPU_ACCELERATION} STREQUAL "AVX_128_FMA" AND NOT MSVC)
@@@ -1023,30 -1097,34 +1073,33 @@@ endif(GMX_FAHCORE
  
  # # # # # # # # # # NO MORE TESTS AFTER THIS LINE! # # # # # # # # # # #
  # these are set after everything else
- if (NOT DEFINED GROMACS_C_FLAGS_SET)
-     set(GROMACS_C_FLAGS_SET true CACHE INTERNAL "Whether to reset the C flags" 
-         FORCE)
-     set(CMAKE_C_FLAGS "${GROMACS_C_FLAGS} ${CMAKE_C_FLAGS}" CACHE STRING 
-         "Flags used by the compiler during all build types" FORCE)
-     set(CMAKE_CXX_FLAGS "${GROMACS_CXX_FLAGS} ${CMAKE_CXX_FLAGS}" CACHE STRING 
-         "Flags used by the compiler during all build types" FORCE)
-     set(CMAKE_EXE_LINKER_FLAGS 
-         "${GROMACS_LINKER_FLAGS} ${CMAKE_EXE_LINKER_FLAGS}" 
-         CACHE STRING "Linker flags for creating executables" FORCE) 
-     set(CMAKE_SHARED_LINKER_FLAGS 
-         "${GROMACS_LINKER_FLAGS} ${CMAKE_SHARED_LINKER_FLAGS}" 
-         CACHE STRING "Linker flags for creating shared libraries" FORCE) 
- endif (NOT DEFINED GROMACS_C_FLAGS_SET)
+ if (NOT GMX_SKIP_DEFAULT_CFLAGS)
+     set(CMAKE_C_FLAGS "${ACCELERATION_C_FLAGS} ${MPI_COMPILE_FLAGS} ${CMAKE_C_FLAGS}")
 -    set(CMAKE_CXX_FLAGS "${ACCELERATION_CXX_FLAGS} ${MPI_COMPILE_FLAGS} ${CMAKE_CXX_FLAGS}")
++    set(CMAKE_CXX_FLAGS "${ACCELERATION_CXX_FLAGS} ${MPI_COMPILE_FLAGS} ${GMX_CXX11_FLAGS} ${CMAKE_CXX_FLAGS}")
+     set(CMAKE_EXE_LINKER_FLAGS "${MPI_LINKER_FLAGS} ${CMAKE_EXE_LINKER_FLAGS}")
+     set(CMAKE_SHARED_LINKER_FLAGS "${MPI_LINKER_FLAGS} ${CMAKE_SHARED_LINKER_FLAGS}")
+ else()
+     message("Recommended flags which are not added because GMX_SKIP_DEFAULT_CFLAGS=yes:")
+     message("CMAKE_C_FLAGS: ${ACCELERATION_C_FLAGS} ${MPI_COMPILE_FLAGS} ${GMXC_CFLAGS}")
+     message("CMAKE_C_FLAGS_RELEASE: ${GMXC_CFLAGS_RELEASE}")
+     message("CMAKE_C_FLAGS_DEBUG: ${GMXC_CFLAGS_DEBUG}")
 -    if(CMAKE_CXX_COMPILER_LOADED)
 -        message("CMAKE_CXX_FLAGS: ${ACCELERATION_CXX_FLAGS} ${MPI_COMPILE_FLAGS} ${GMXC_CXXFLAGS}")
 -        message("CMAKE_CXX_FLAGS_RELEASE: ${GMXC_CXXFLAGS_RELEASE}")
 -        message("CMAKE_CXX_FLAGS_DEBUG: ${GMXC_CXXFLAGS_DEBUG}")
 -    endif()
++    message("CMAKE_CXX_FLAGS: ${ACCELERATION_CXX_FLAGS} ${MPI_COMPILE_FLAGS} ${GMX_CXX11_FLAGS} ${GMXC_CXXFLAGS}")
++    message("CMAKE_CXX_FLAGS_RELEASE: ${GMXC_CXXFLAGS_RELEASE}")
++    message("CMAKE_CXX_FLAGS_DEBUG: ${GMXC_CXXFLAGS_DEBUG}")
+     message("CMAKE_EXE_LINKER_FLAGS: ${MPI_LINKER_FLAGS}")
+     message("CMAKE_SHARED_LINKER_FLAGS: ${MPI_LINKER_FLAGS}")
+ endif()
  
  if(NOT GMX_OPENMP)
      #Unset all OpenMP flags in case OpenMP was disabled either by the user
      #or because it was only partially detected (e.g. only for C but not C++ compiler)
      unset(OpenMP_C_FLAGS CACHE) 
      unset(OpenMP_CXX_FLAGS CACHE)
 -    unset(OpenMP_LINKER_FLAGS CACHE)
 -    unset(OpenMP_SHARED_LINKER_FLAGS)
 +else()
 +    set(GMX_EXE_LINKER_FLAGS ${GMX_EXE_LINKER_FLAGS} ${OpenMP_LINKER_FLAGS})
 +    set(GMX_SHARED_LINKER_FLAGS ${GMX_SHARED_LINKER_FLAGS} ${OpenMP_SHARED_LINKER_FLAGS})
  endif()
+ set(PKG_CFLAGS "${PKG_CFLAGS} ${OpenMP_C_FLAGS}")
  
  ######################################
  # Output compiler and CFLAGS used
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index ceea9ca0a76acbf0cacd21cce318252c30e8a83a,0000000000000000000000000000000000000000..f7cb20d2ea89bbbe20eb8ae3c93683cca399bc84
mode 100644,000000..100644
--- /dev/null
@@@ -1,43 -1,0 +1,34 @@@
-     IF(NOT DEFINED HAVE_${VARIABLE})
-         MESSAGE(STATUS "Checking for C++11 support")
-         if(NOT WIN32)
-             set(CXX11_FLAG "-std=c++0x")
-         else()
-             set(CXX11_FLAG "/Qstd=c++0x")
-         endif()
-         CHECK_CXX_COMPILER_FLAG("${CXX11_FLAG}" CXXFLAG_STD_CXX0X)
-         if(NOT CXXFLAG_STD_CXX0X)
-             set(CXX11_FLAG "")
-         endif()
-         set(CMAKE_REQUIRED_DEFINITIONS "${CXX11_FLAG}")
-         check_cxx_source_compiles(
 +include(CheckCXXSourceCompiles)
 +MACRO(GMX_TEST_CXX11 VARIABLE FLAG)
- }" HAVE_${VARIABLE})
-         set(CMAKE_REQUIRED_DEFINITIONS "")
-         if(HAVE_${VARIABLE})
-             set(${VARIABLE} 1 CACHE INTERNAL "Result of C++11 support test" FORCE)
-             set(${FLAG} ${CXX11_FLAG} CACHE INTERNAL "Compiler flag for C++11 support" FORCE)
-             MESSAGE(STATUS "Checking for C++11 support - yes")
-         else()
-             set(${VARIABLE} 0 CACHE INTERNAL "Result of C++11 support test" FORCE)
-             set(${FLAG} "" CACHE INTERNAL "Compiler flag for C++11 support" FORCE)
-             MESSAGE(STATUS "Checking for C++11 support - no")
-         endif()
-     ENDIF(NOT DEFINED HAVE_${VARIABLE})
++    if(NOT WIN32)
++        set(CXX11_FLAG "-std=c++0x")
++    else()
++        set(CXX11_FLAG "/Qstd=c++0x")
++    endif()
++    CHECK_CXX_COMPILER_FLAG("${CXX11_FLAG}" CXXFLAG_STD_CXX0X)
++    if(NOT CXXFLAG_STD_CXX0X)
++        set(CXX11_FLAG "")
++    endif()
++    set(CMAKE_REQUIRED_DEFINITIONS "${CXX11_FLAG}")
++    check_cxx_source_compiles(
 +"#include <vector>
 +#include <memory>
 +#include <utility>
 +struct A {
 +  A(int *i=NULL) : p(i) {} ;
 +  std::unique_ptr<int> p;
 +};
 +int main() {
 +  typedef std::unique_ptr<int> intPointer;
 +  intPointer p(new int(10));
 +  std::vector<intPointer> v;
 +  v.push_back(std::move(p));
 +  std::vector<A> v2;
 +  v2.push_back(A());  //requires default move constructor
 +  v2.push_back(A(new int(5))); //detects bug in ICC
++}" ${VARIABLE})
++    set(CMAKE_REQUIRED_DEFINITIONS "")
++    if(${VARIABLE})
++        set(${FLAG} ${CXX11_FLAG})
++    endif()
 +ENDMACRO()
index 9917088959a64eb63ea7fee6b1894c1e0c0497b1,b57659a9495927e3e7b38ecb9060f5a182907956..bc5f737fba02a62c080b97ccee65776302071f56
@@@ -5,15 -5,41 +5,14 @@@
  # If you only use one shell you can copy that GMXRC.* instead.
  
  
- # only csh/tcsh understand 'set'
- set is_csh = 123
- test "$is_csh" = 123 && goto CSH
+ # only csh/tcsh set the variable $shell (note: lower case!)
+ test $shell && goto CSH
  
  # if we got here, shell is bsh/bash/zsh/ksh
 -# bsh cannot remove part of a variable with %%
 -shtst="A.B"
 -if [ "`(echo ${shtst%%.*}) 2>/dev/null`" = A ]; then
 -
 -  # shell is bash/zsh/ksh
 -  # bash/zsh use $[...] for arithmetic evaluation, ksh doesn't
 -  if [ "`echo $[0+1]`" = 1 ]; then
 -    
 -    # shell is zsh/bash
 -    # zsh can test if the variable shtst is set with ${+shtst}
 -    if [ "`(echo ${+shtst}) 2>/dev/null`" = 1 ]; then
 -      # shell is zsh
 -      source @BIN_INSTALL_DIR@/GMXRC.zsh
 -    else  
 -      # shell is bash
 -      source @BIN_INSTALL_DIR@/GMXRC.bash      
 -    fi
 -
 -  else    
 -    # shell is ksh - use bash setup, completions won't be read.
 -     . @BIN_INSTALL_DIR@/GMXRC.bash
 -  fi
 -  return
 -else
 -  # shell is bsh - use bash setup, completions won't be read.
 -  . @BIN_INSTALL_DIR@/GMXRC.bash
 -  exit
 -fi
 +. @BIN_INSTALL_DIR@/GMXRC.bash
 +return
  
  # csh/tcsh jump here
 -
  CSH:
  source @BIN_INSTALL_DIR@/GMXRC.csh
  
Simple merge
Simple merge
index edf009283fce31d616cc92188a9ddb933cf5157d,0000000000000000000000000000000000000000..425cc60d89b7b24c083de6c8dded03726c38eeee
mode 100644,000000..100644
--- /dev/null
@@@ -1,107 -1,0 +1,103 @@@
- if(GMX_GPU)
-     include_directories(${CUDA_TOOLKIT_INCLUDE})
- endif()
 +set(LIBGROMACS_SOURCES)
 +
 +add_subdirectory(legacyheaders)
 +add_subdirectory(gmxlib)
 +add_subdirectory(mdlib)
 +add_subdirectory(gmxpreprocess)
 +add_subdirectory(analysisdata)
 +add_subdirectory(commandline)
 +add_subdirectory(linearalgebra)
 +add_subdirectory(onlinehelp)
 +add_subdirectory(options)
 +add_subdirectory(selection)
 +add_subdirectory(trajectoryanalysis)
 +add_subdirectory(utility)
 +
 +file(GLOB LIBGROMACS_HEADERS *.h)
 +install(FILES ${LIBGROMACS_HEADERS} DESTINATION ${INCL_INSTALL_DIR}/gromacs
 +        COMPONENT development)
 +
 +list(APPEND LIBGROMACS_SOURCES ${GMXLIB_SOURCES} ${MDLIB_SOURCES})
 +
 +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/version.h.cmakein ${CMAKE_CURRENT_BINARY_DIR}/version.h)
 +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/version.h
 +    DESTINATION ${INCL_INSTALL_DIR}/gromacs
 +    COMPONENT development)
 +
 +# Add target that generates gitversion.c every time make is run
 +# if git version info is requested
 +# This code is here instead of utility/CMakeLists.txt because CMake
 +# ignores set_source_file_properties from subdirectories.
 +if (GMX_GIT_VERSION_INFO)
 +    set(GENERATED_VERSION_FILE ${CMAKE_CURRENT_BINARY_DIR}/utility/gitversion.c)
 +    add_custom_target(gmx_version ALL
 +            COMMAND ${CMAKE_COMMAND}
 +                -D GIT_EXECUTABLE="${GIT_EXECUTABLE}"
 +                -D GIT_VERSION="${GIT_VERSION}"
 +                -D PROJECT_VERSION="${PROJECT_VERSION}"
 +                -D PROJECT_SOURCE_DIR="${PROJECT_SOURCE_DIR}"
 +                -D VERSION_C_CMAKEIN="${CMAKE_CURRENT_SOURCE_DIR}/utility/gitversion.c.cmakein"
 +                -D VERSION_C_OUT=${GENERATED_VERSION_FILE}
 +                -P ${CMAKE_SOURCE_DIR}/cmake/gmxGenerateVersionInfo.cmake
 +            WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
 +            DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/utility/gitversion.c.cmakein
 +            COMMENT "Generating git version information")
 +    set_source_files_properties(${GENERATED_VERSION_FILE}
 +                                PROPERTIES GENERATED true)
 +    list(APPEND LIBGROMACS_SOURCES ${GENERATED_VERSION_FILE})
 +endif()
 +
 +# apply gcc 4.4.x bug workaround
 +if(GMX_USE_GCC44_BUG_WORKAROUND)
 +   include(gmxGCC44O3BugWorkaround)
 +   gmx_apply_gcc44_bug_workaround("gmxlib/bondfree.c")
 +   gmx_apply_gcc44_bug_workaround("mdlib/force.c")
 +   gmx_apply_gcc44_bug_workaround("mdlib/constr.c")
 +endif()
 +
 +add_library(libgromacs ${LIBGROMACS_SOURCES})
 +if (GMX_GIT_VERSION_INFO)
 +    add_dependencies(libgromacs gmx_version)
 +endif ()
 +
 +if(GMX_BUILD_OWN_FFTW)
 +    # This dependency has to be made here rather than the CMakeLists.txt that
 +    # does the FFTW build, because of the order in which
 +    # add_subdirectory() calls are made in the top-level CMakeLists.txt; the
 +    # md library target does not necessarily exist yet. Also enabling and
 +    # disabling GMX_BUILD_OWN_FFTW changes dependencies correctly.
 +    add_dependencies(libgromacs gmxfftw)
 +endif()
 +
 +target_link_libraries(libgromacs ${GMX_GPU_LIBRARIES}
 +                      ${GMX_EXTRA_LIBRARIES} ${FFT_LIBRARIES} ${XML_LIBRARIES}
 +                      ${THREAD_LIB} ${GMX_SHARED_LINKER_FLAGS})
 +set_target_properties(libgromacs PROPERTIES
 +                      OUTPUT_NAME "gromacs${GMX_LIBS_SUFFIX}"
 +                      SOVERSION ${SOVERSION}
 +                      COMPILE_FLAGS "${OpenMP_C_FLAGS}")
 +
 +install(TARGETS libgromacs DESTINATION ${LIB_INSTALL_DIR} COMPONENT libraries)
 +
 +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/libgromacs.pc.cmakein
 +               ${CMAKE_CURRENT_BINARY_DIR}/libgromacs.pc @ONLY)
 +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/libgromacs.pc
 +        DESTINATION ${LIB_INSTALL_DIR}/pkgconfig
 +        RENAME "libgromacs${GMX_LIBS_SUFFIX}.pc"
 +        COMPONENT development)
 +
 +if (INSTALL_CUDART_LIB) #can be set manual by user
 +    if (GMX_OPENMM OR GMX_GPU)
 +        foreach(CUDA_LIB ${CUDA_LIBRARIES})
 +            string(REGEX MATCH "cudart" IS_CUDART ${CUDA_LIB})
 +            if(IS_CUDART) #libcuda should not be installed
 +                #install also name-links (linker uses those)
 +                file(GLOB CUDA_LIBS ${CUDA_LIB}*)
 +                install(FILES ${CUDA_LIBS} DESTINATION
 +                    ${LIB_INSTALL_DIR} COMPONENT libraries)
 +            endif()
 +        endforeach()
 +    else()
 +        message(WARNING "INSTALL_CUDART_LIB only makes sense with GMX_OPENMM or GMX_GPU")
 +    endif()
 +endif ()
index a2bc622e11992ae3712cbae583846bf27b585b30,0000000000000000000000000000000000000000..ffcbdce9e235fe009d0166740754c734811f42a3
mode 100644,000000..100644
--- /dev/null
@@@ -1,365 -1,0 +1,361 @@@
-     upstring(atomnm);
-     upstring(resnm);
 +/*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * GROningen Mixture of Alchemy and Childrens' Stories
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <ctype.h>
 +#include "sysstuff.h"
 +#include "smalloc.h"
 +#include "string2.h"
 +#include "futil.h"
 +#include "maths.h"
 +#include "gmx_fatal.h"
 +#include "atomprop.h"
 +#include "maths.h"
 +#include "macros.h"
 +#include "index.h"
 +#include "strdb.h"
 +#include "copyrite.h"
 +
 +typedef struct {
 +  gmx_bool   bSet;
 +  int    nprop,maxprop;
 +  char   *db;
 +  double def;
 +  char   **atomnm;
 +  char   **resnm;
 +  gmx_bool   *bAvail;
 +  real   *value;
 +} aprop_t;
 +
 +typedef struct gmx_atomprop {
 +  gmx_bool       bWarned;
 +  aprop_t    prop[epropNR];
 +  gmx_residuetype_t restype;
 +} gmx_atomprop;
 +
 +
 +
 +/* NOTFOUND should be smallest, others larger in increasing priority */
 +enum { NOTFOUND=-4, WILDCARD, WILDPROT, PROTEIN };
 +
 +/* return number of matching characters, 
 +   or NOTFOUND if not at least all characters in char *database match */
 +static int dbcmp_len(char *search, char *database)
 +{
 +  int i;
 +  
 +  i=0;
 +  while(search[i] && database[i] && (search[i]==database[i]) )
 +    i++;
 +  
 +  if (database[i])
 +    i=NOTFOUND;
 +  return i;
 +}
 +
 +static int get_prop_index(aprop_t *ap,gmx_residuetype_t restype,
 +                        char *resnm,char *atomnm,
 +                        gmx_bool *bExact)
 +{
 +  int  i,j=NOTFOUND;
 +  long int alen,rlen;
 +  long int malen,mrlen;
 +  gmx_bool bProtein,bProtWild;
 +  
 +  bProtein  = gmx_residuetype_is_protein(restype,resnm);
 +  bProtWild = (strcmp(resnm,"AAA")==0);
 +  malen = NOTFOUND;
 +  mrlen = NOTFOUND;
 +  for(i=0; (i<ap->nprop); i++) {
 +    rlen = dbcmp_len(resnm, ap->resnm[i]);
 +    if (rlen == NOTFOUND) {
 +      if ( (strcmp(ap->resnm[i],"*")==0) ||
 +         (strcmp(ap->resnm[i],"???")==0) )
 +      rlen=WILDCARD;
 +      else if (strcmp(ap->resnm[i],"AAA")==0)
 +      rlen=WILDPROT;
 +    }
 +    alen = dbcmp_len(atomnm, ap->atomnm[i]);
 +    if ( (alen > NOTFOUND) && (rlen > NOTFOUND)) {
 +      if ( ( (alen > malen) && (rlen >= mrlen)) ||
 +         ( (rlen > mrlen) && (alen >= malen) ) ) {
 +      malen = alen;
 +      mrlen = rlen;
 +      j     = i;
 +      }
 +    }
 +  }
 +  
 +  *bExact = ((malen == (long int)strlen(atomnm)) &&
 +           ((mrlen == (long int)strlen(resnm)) || 
 +            ((mrlen == WILDPROT) && bProtWild) ||
 +            ((mrlen == WILDCARD) && !bProtein && !bProtWild)));
 +  
 +  if (debug) {
 +    fprintf(debug,"searching residue: %4s atom: %4s\n",resnm,atomnm);
 +    if (j == NOTFOUND)
 +      fprintf(debug," not successful\n");
 +    else
 +      fprintf(debug," match: %4s %4s\n",ap->resnm[j],ap->atomnm[j]);
 +  }
 +  return j;
 +}
 +
 +static void add_prop(aprop_t *ap,gmx_residuetype_t restype,
 +                   char *resnm,char *atomnm,
 +                   real p,int line) 
 +{
 +  int  i,j;
 +  gmx_bool bExact;
 +  
 +  j = get_prop_index(ap,restype,resnm,atomnm,&bExact);
 +  
 +  if (!bExact) {
 +    if (ap->nprop >= ap->maxprop) {
 +      ap->maxprop += 10;
 +      srenew(ap->resnm,ap->maxprop);
 +      srenew(ap->atomnm,ap->maxprop);
 +      srenew(ap->value,ap->maxprop);
 +      srenew(ap->bAvail,ap->maxprop);
 +      for(i=ap->nprop; (i<ap->maxprop); i++) {
 +      ap->atomnm[i] = NULL;
 +      ap->resnm[i]  = NULL;
 +      ap->value[i]  = 0;
 +      ap->bAvail[i] = FALSE;
 +      }
 +    }
-   upstring(atomname);
 +    ap->atomnm[ap->nprop] = strdup(atomnm);
 +    ap->resnm[ap->nprop]  = strdup(resnm);
 +    j = ap->nprop;
 +    ap->nprop++;
 +  }
 +  if (ap->bAvail[j]) {
 +    if (ap->value[j] == p)
 +      fprintf(stderr,"Warning double identical entries for %s %s %g on line %d in file %s\n",
 +            resnm,atomnm,p,line,ap->db);
 +    else {
 +      fprintf(stderr,"Warning double different entries %s %s %g and %g on line %d in file %s\n"
 +            "Using last entry (%g)\n",
 +            resnm,atomnm,p,ap->value[j],line,ap->db,p);
 +      ap->value[j] = p;
 +    }
 +  }
 +  else {
 +    ap->bAvail[j] = TRUE;
 +    ap->value[j]  = p;
 +  }
 +}
 +
 +static void read_prop(gmx_atomprop_t aps,int eprop,double factor)
 +{
 +  gmx_atomprop *ap2 = (gmx_atomprop*) aps;
 +  FILE   *fp;
 +  char   line[STRLEN],resnm[32],atomnm[32];
 +  double pp;
 +  int    line_no;
 +  aprop_t *ap;
 +
 +  ap = &ap2->prop[eprop];
 +
 +  fp      = libopen(ap->db);
 +  line_no = 0;
 +  while(get_a_line(fp,line,STRLEN)) {
 +    line_no++;
 +    if (sscanf(line,"%s %s %lf",resnm,atomnm,&pp) == 3) {
 +      pp *= factor;
 +      add_prop(ap,aps->restype,resnm,atomnm,pp,line_no);
 +    }
 +    else 
 +      fprintf(stderr,"WARNING: Error in file %s at line %d ignored\n",
 +            ap->db,line_no);
 +  }
 +      
 +  /* for libraries we can use the low-level close routines */
 +  ffclose(fp);
 +
 +  ap->bSet = TRUE;
 +}
 +
 +static void set_prop(gmx_atomprop_t aps,int eprop) 
 +{
 +  gmx_atomprop *ap2 = (gmx_atomprop*) aps;
 +  const char *fns[epropNR]  = { "atommass.dat", "vdwradii.dat", "dgsolv.dat", "electroneg.dat", "elements.dat" };
 +  double fac[epropNR] = { 1.0,    1.0,  418.4, 1.0, 1.0 };
 +  double def[epropNR] = { 12.011, 0.14, 0.0, 2.2, -1 };
 +  aprop_t *ap;
 +
 +  ap = &ap2->prop[eprop];
 +  if (!ap->bSet) {
 +    ap->db  = strdup(fns[eprop]);
 +    ap->def = def[eprop];
 +    read_prop(aps,eprop,fac[eprop]);
 +    
 +    if (debug)
 +      fprintf(debug,"Entries in %s: %d\n",ap->db,ap->nprop);
 +
 +    if ( ( (!aps->bWarned) && (eprop == epropMass) ) || (eprop == epropVDW)) {
 +      printf("\n"
 +             "WARNING: Masses and atomic (Van der Waals) radii will be guessed\n"
 +             "         based on residue and atom names, since they could not be\n"
 +             "         definitively assigned from the information in your input\n"
 +             "         files. These guessed numbers might deviate from the mass\n"
 +             "         and radius of the atom type. Please check the output\n"
 +             "         files if necessary.\n\n");
 +      aps->bWarned = TRUE;
 +    }
 +  }
 +}
 +
 +gmx_atomprop_t gmx_atomprop_init(void)
 +{
 +  gmx_atomprop *aps;
 +  int p;
 +
 +  snew(aps,1);
 +
 +  gmx_residuetype_init(&aps->restype);
 +  aps->bWarned = FALSE;
 +
 +  return (gmx_atomprop_t)aps;
 +}
 +
 +static void destroy_prop(aprop_t *ap)
 +{
 +  int i;
 +
 +  if (ap->bSet) {
 +    sfree(ap->db);
 +    
 +    for(i=0; i<ap->nprop; i++) {
 +      sfree(ap->atomnm[i]);
 +      sfree(ap->resnm[i]);
 +    }
 +    sfree(ap->atomnm);
 +    sfree(ap->resnm);
 +    sfree(ap->bAvail);
 +    sfree(ap->value);
 +  }
 +}
 +
 +void gmx_atomprop_destroy(gmx_atomprop_t aps)
 +{
 +  gmx_atomprop *ap = (gmx_atomprop*) aps;
 +  int p;
 +
 +  if (aps == NULL) {
 +    printf("\nWARNING: gmx_atomprop_destroy called with a NULL pointer\n\n");
 +    return;
 +  }
 +
 +  for(p=0; p<epropNR; p++) {
 +    destroy_prop(&ap->prop[p]);
 +  }
 +
 +  gmx_residuetype_destroy(ap->restype);
 +
 +  sfree(ap);
 +}
 +
 +gmx_bool gmx_atomprop_query(gmx_atomprop_t aps,
 +                      int eprop,const char *resnm,const char *atomnm,
 +                      real *value)
 +{
 +  gmx_atomprop *ap = (gmx_atomprop*) aps;
 +  size_t i;
 +  int  j;
 +#define MAXQ 32
 +  char atomname[MAXQ],resname[MAXQ];
 +  gmx_bool bExact;
 +
 +  set_prop(aps,eprop);
 +  if ((strlen(atomnm) > MAXQ-1) || (strlen(resnm) > MAXQ-1)) {
 +    if (debug)
 +      fprintf(debug,"WARNING: will only compare first %d characters\n",
 +            MAXQ-1);
 +  }
 +  if (isdigit(atomnm[0])) {
 +    /* put digit after atomname */
 +    for (i=1; (i<min(MAXQ-1,strlen(atomnm))); i++)
 +      atomname[i-1] = atomnm[i];
 +    atomname[i-1] = atomnm[0];
 +    atomname[i]   = '\0';
 +  } 
 +  else { 
 +    strncpy(atomname,atomnm,MAXQ-1);
 +  }
-   upstring(resname);
 +  strncpy(resname,resnm,MAXQ-1);
 +  
 +  j = get_prop_index(&(ap->prop[eprop]),ap->restype,resname,
 +                   atomname,&bExact);
 +  
 +  if (j >= 0) {
 +    *value = ap->prop[eprop].value[j];
 +    return TRUE;
 +  }
 +  else {
 +    *value = ap->prop[eprop].def;
 +    return FALSE;
 +  }
 +}
 +
 +char *gmx_atomprop_element(gmx_atomprop_t aps,int atomnumber)
 +{
 +  gmx_atomprop *ap = (gmx_atomprop*) aps;
 +  int i;
 +  
 +  set_prop(aps,epropElement);
 +  for(i=0; (i<ap->prop[epropElement].nprop); i++) {
 +    if (gmx_nint(ap->prop[epropElement].value[i]) == atomnumber) {
 +      return ap->prop[epropElement].atomnm[i];
 +    }
 +  }
 +  return NULL;
 +}
 +
 +int gmx_atomprop_atomnumber(gmx_atomprop_t aps,const char *elem)
 +{
 +  gmx_atomprop *ap = (gmx_atomprop*) aps;
 +  int i;
 +  
 +  set_prop(aps,epropElement);
 +  for(i=0; (i<ap->prop[epropElement].nprop); i++) {
 +    if (gmx_strcasecmp(ap->prop[epropElement].atomnm[i],elem) == 0) {
 +      return gmx_nint(ap->prop[epropElement].value[i]);
 +    }
 +  }
 +  return NOTSET;
 +}
index 5ae98d0047faf2763f632fe0d5134eccc59aa19a,0000000000000000000000000000000000000000..704310a81adf5ecc2e74f40176c2735aa23af84f
mode 100644,000000..100644
--- /dev/null
@@@ -1,2403 -1,0 +1,2533 @@@
- static const int cpt_version = 14;
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + * This file is part of Gromacs        Copyright (c) 1991-2008
 + * David van der Spoel, Erik Lindahl, Berk Hess, University of Groningen.
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the research papers on the package. Check out http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gnomes, ROck Monsters And Chili Sauce
 + */
 +
 +/* The source code in this file should be thread-safe. 
 + Please keep it that way. */
 +
 +
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <string.h>
 +#include <time.h>
 +
 +#ifdef HAVE_SYS_TIME_H
 +#include <sys/time.h>
 +#endif
 +
 +#ifdef HAVE_UNISTD_H
 +#include <unistd.h>
 +#endif
 +
 +#ifdef GMX_NATIVE_WINDOWS
 +/* _chsize_s */
 +#include <io.h>
 +#include <sys/locking.h>
 +#endif
 +
 +
 +#include "filenm.h"
 +#include "names.h"
 +#include "typedefs.h"
 +#include "smalloc.h"
 +#include "gmxfio.h"
 +#include "xdrf.h"
 +#include "statutil.h"
 +#include "txtdump.h"
 +#include "vec.h"
 +#include "network.h"
 +#include "gmx_random.h"
 +#include "checkpoint.h"
 +#include "futil.h"
 +#include "string2.h"
 +#include <fcntl.h>
 +
 +#include "buildinfo.h"
 +
 +#ifdef GMX_FAHCORE
 +#include "corewrap.h"
 +#endif
 +
 +
 +/* Portable version of ctime_r implemented in src/gmxlib/string2.c, but we do not want it declared in public installed headers */
 +char *
 +gmx_ctime_r(const time_t *clock,char *buf, int n);
 +
 +
 +#define CPT_MAGIC1 171817
 +#define CPT_MAGIC2 171819
 +#define CPTSTRLEN 1024
 +
 +#ifdef GMX_DOUBLE
 +#define GMX_CPT_BUILD_DP 1
 +#else
 +#define GMX_CPT_BUILD_DP 0
 +#endif
 +
 +/* cpt_version should normally only be changed
 + * when the header of footer format changes.
 + * The state data format itself is backward and forward compatible.
 + * But old code can not read a new entry that is present in the file
 + * (but can read a new format when new entries are not present).
 + */
-                   &fflags,&flags_eks,&flags_enh,&flags_dfh,NULL);
++static const int cpt_version = 15;
 +
 +
 +const char *est_names[estNR]=
 +{
 +    "FE-lambda",
 +    "box", "box-rel", "box-v", "pres_prev",
 +    "nosehoover-xi", "thermostat-integral",
 +    "x", "v", "SDx", "CGp", "LD-rng", "LD-rng-i",
 +    "disre_initf", "disre_rm3tav",
 +    "orire_initf", "orire_Dtav",
 +    "svir_prev", "nosehoover-vxi", "v_eta", "vol0", "nhpres_xi", "nhpres_vxi", "fvir_prev","fep_state", "MC-rng", "MC-rng-i"
 +};
 +
 +enum { eeksEKIN_N, eeksEKINH, eeksDEKINDL, eeksMVCOS, eeksEKINF, eeksEKINO, eeksEKINSCALEF, eeksEKINSCALEH, eeksVSCALE, eeksEKINTOTAL, eeksNR };
 +
 +const char *eeks_names[eeksNR]=
 +{
 +    "Ekin_n", "Ekinh", "dEkindlambda", "mv_cos",
 +    "Ekinf", "Ekinh_old", "EkinScaleF_NHC", "EkinScaleH_NHC","Vscale_NHC","Ekin_Total"
 +};
 +
 +enum { eenhENERGY_N, eenhENERGY_AVER, eenhENERGY_SUM, eenhENERGY_NSUM,
 +       eenhENERGY_SUM_SIM, eenhENERGY_NSUM_SIM,
 +       eenhENERGY_NSTEPS, eenhENERGY_NSTEPS_SIM, 
 +       eenhENERGY_DELTA_H_NN,
 +       eenhENERGY_DELTA_H_LIST, 
 +       eenhENERGY_DELTA_H_STARTTIME, 
 +       eenhENERGY_DELTA_H_STARTLAMBDA, 
 +       eenhNR };
 +
 +const char *eenh_names[eenhNR]=
 +{
 +    "energy_n", "energy_aver", "energy_sum", "energy_nsum",
 +    "energy_sum_sim", "energy_nsum_sim",
 +    "energy_nsteps", "energy_nsteps_sim", 
 +    "energy_delta_h_nn",
 +    "energy_delta_h_list", 
 +    "energy_delta_h_start_time", 
 +    "energy_delta_h_start_lambda"
 +};
 +
 +/* free energy history variables -- need to be preserved over checkpoint */
 +enum { edfhBEQUIL,edfhNATLAMBDA,edfhWLHISTO,edfhWLDELTA,edfhSUMWEIGHTS,edfhSUMDG,edfhSUMMINVAR,edfhSUMVAR,
 +       edfhACCUMP,edfhACCUMM,edfhACCUMP2,edfhACCUMM2,edfhTIJ,edfhTIJEMP,edfhNR };
 +/* free energy history variable names  */
 +const char *edfh_names[edfhNR]=
 +{
 +    "bEquilibrated","N_at_state", "Wang-Landau_Histogram", "Wang-Landau-delta", "Weights", "Free Energies", "minvar","variance",
 +    "accumulated_plus", "accumulated_minus", "accumulated_plus_2",  "accumulated_minus_2", "Tij", "Tij_empirical"
 +};
 +
 +#ifdef GMX_NATIVE_WINDOWS
 +static int
 +gmx_wintruncate(const char *filename, __int64 size)
 +{
 +#ifdef GMX_FAHCORE
 +    /*we do this elsewhere*/
 +    return 0;
 +#else
 +    FILE *fp;
 +    int   rc;
 +    
 +    fp=fopen(filename,"rb+");
 +    
 +    if(fp==NULL)
 +    {
 +        return -1;
 +    }
 +    
 +    return _chsize_s( fileno(fp), size);
 +#endif
 +}
 +#endif
 +
 +
 +enum { ecprREAL, ecprRVEC, ecprMATRIX };
 +
 +enum { cptpEST, cptpEEKS, cptpEENH, cptpEDFH };
 +/* enums for the different components of checkpoint variables, replacing the hard coded ones.
 +   cptpEST - state variables.
 +   cptpEEKS - Kinetic energy state variables.
 +   cptpEENH - Energy history state variables.
 +   cptpEDFH - free energy history variables.
 +*/
 +
 +
 +static const char *st_names(int cptp,int ecpt)
 +{
 +    switch (cptp)
 +    {
 +    case cptpEST: return est_names [ecpt]; break;
 +    case cptpEEKS: return eeks_names[ecpt]; break;
 +    case cptpEENH: return eenh_names[ecpt]; break;
 +    case cptpEDFH: return edfh_names[ecpt]; break;
 +    }
 +
 +    return NULL;
 +}
 +
 +static void cp_warning(FILE *fp)
 +{
 +    fprintf(fp,"\nWARNING: Checkpoint file is corrupted or truncated\n\n");
 +}
 +
 +static void cp_error()
 +{
 +    gmx_fatal(FARGS,"Checkpoint file corrupted/truncated, or maybe you are out of disk space?");
 +}
 +
 +static void do_cpt_string_err(XDR *xd,gmx_bool bRead,const char *desc,char **s,FILE *list)
 +{
 +    bool_t res=0;
 +    
 +    if (bRead)
 +    {
 +        snew(*s,CPTSTRLEN);
 +    }
 +    res = xdr_string(xd,s,CPTSTRLEN);
 +    if (res == 0)
 +    {
 +        cp_error();
 +    }
 +    if (list)
 +    {
 +        fprintf(list,"%s = %s\n",desc,*s);
 +        sfree(*s);
 +    }
 +}
 +
 +static int do_cpt_int(XDR *xd,const char *desc,int *i,FILE *list)
 +{
 +    bool_t res=0;
 +    
 +    res = xdr_int(xd,i);
 +    if (res == 0)
 +    {
 +        return -1;
 +    }
 +    if (list)
 +    {
 +        fprintf(list,"%s = %d\n",desc,*i);
 +    }
 +    return 0;
 +}
 +
 +static int do_cpt_u_chars(XDR *xd,const char *desc,int n,unsigned char *i,FILE *list)
 +{
 +    bool_t res=1;
 +    int j;
 +    if (list)
 +    {
 +        fprintf(list,"%s = ",desc);
 +    }
 +    for (j=0; j<n && res; j++)
 +    {
 +        res &= xdr_u_char(xd,&i[j]);
 +        if (list)
 +        {
 +            fprintf(list,"%02x",i[j]);
 +        }
 +    }
 +    if (list)
 +    {
 +        fprintf(list,"\n");
 +    }
 +    if (res == 0)
 +    {
 +        return -1;
 +    }
 +
 +    return 0;
 +}
 +
 +static void do_cpt_int_err(XDR *xd,const char *desc,int *i,FILE *list)
 +{
 +    if (do_cpt_int(xd,desc,i,list) < 0)
 +    {
 +        cp_error();
 +    }
 +}
 +
 +static void do_cpt_step_err(XDR *xd,const char *desc,gmx_large_int_t *i,FILE *list)
 +{
 +    bool_t res=0;
 +    char   buf[STEPSTRSIZE];
 +
 +    res = xdr_gmx_large_int(xd,i,"reading checkpoint file");
 +    if (res == 0)
 +    {
 +        cp_error();
 +    }
 +    if (list)
 +    {
 +        fprintf(list,"%s = %s\n",desc,gmx_step_str(*i,buf));
 +    }
 +}
 +
 +static void do_cpt_double_err(XDR *xd,const char *desc,double *f,FILE *list)
 +{
 +    bool_t res=0;
 +    
 +    res = xdr_double(xd,f);
 +    if (res == 0)
 +    {
 +        cp_error();
 +    }
 +    if (list)
 +    {
 +        fprintf(list,"%s = %f\n",desc,*f);
 +    }
 +}
 +
++static void do_cpt_real_err(XDR *xd,const char *desc,real *f)
++{
++    bool_t res=0;
++
++#ifdef GMX_DOUBLE
++    res = xdr_double(xd,f);
++#else
++    res = xdr_float(xd,f);
++#endif
++    if (res == 0)
++    {
++        cp_error();
++    }
++}
++
++static void do_cpt_n_rvecs_err(XDR *xd,const char *desc,int n, rvec f[],FILE *list)
++{
++    int i,j;
++
++    for (i=0; i<n; i++)
++    {
++        for (j=0; j<DIM; j++)
++        {
++            do_cpt_real_err(xd, desc, &f[i][j]);
++        }
++    }
++
++    if (list)
++    {
++        pr_rvecs(list,0,desc,f,n);
++    }
++}
++
 +/* If nval >= 0, nval is used; on read this should match the passed value.
 + * If nval n<0, *nptr is used; on read the value is stored in nptr
 + */
 +static int do_cpte_reals_low(XDR *xd,int cptp,int ecpt,int sflags,
 +                             int nval,int *nptr,real **v,
 +                             FILE *list,int erealtype)
 +{
 +    bool_t res=0;
 +#ifndef GMX_DOUBLE
 +    int  dtc=xdr_datatype_float; 
 +#else
 +    int  dtc=xdr_datatype_double;
 +#endif
 +    real *vp,*va=NULL;
 +    float  *vf;
 +    double *vd;
 +    int  nf,dt,i;
 +    
 +    if (list == NULL)
 +    {
 +        if (nval >= 0)
 +        {
 +            nf = nval;
 +        }
 +        else
 +        {
 +        if (nptr == NULL)
 +        {
 +            gmx_incons("*ntpr=NULL in do_cpte_reals_low");
 +        }
 +        nf = *nptr;
 +        }
 +    }
 +    res = xdr_int(xd,&nf);
 +    if (res == 0)
 +    {
 +        return -1;
 +    }
 +    if (list == NULL)
 +    {
 +        if (nval >= 0)
 +        {
 +            if (nf != nval)
 +            {
 +                gmx_fatal(FARGS,"Count mismatch for state entry %s, code count is %d, file count is %d\n",st_names(cptp,ecpt),nval,nf);
 +            }
 +        }
 +        else
 +        {
 +            *nptr = nf;
 +        }
 +    }
 +    dt = dtc;
 +    res = xdr_int(xd,&dt);
 +    if (res == 0)
 +    {
 +        return -1;
 +    }
 +    if (dt != dtc)
 +    {
 +        fprintf(stderr,"Precision mismatch for state entry %s, code precision is %s, file precision is %s\n",
 +                st_names(cptp,ecpt),xdr_datatype_names[dtc],
 +                xdr_datatype_names[dt]);
 +    }
 +    if (list || !(sflags & (1<<ecpt)))
 +    {
 +        snew(va,nf);
 +        vp = va;
 +    }
 +    else
 +    {
 +        if (*v == NULL)
 +        {
 +            snew(*v,nf);
 +        }
 +        vp = *v;
 +    }
 +    if (dt == xdr_datatype_float)
 +    {
 +        if (dtc == xdr_datatype_float)
 +        {
 +            vf = (float *)vp;
 +        }
 +        else
 +        {
 +            snew(vf,nf);
 +        }
 +        res = xdr_vector(xd,(char *)vf,nf,
 +                         (unsigned int)sizeof(float),(xdrproc_t)xdr_float);
 +        if (res == 0)
 +        {
 +            return -1;
 +        }
 +        if (dtc != xdr_datatype_float)
 +        {
 +            for(i=0; i<nf; i++)
 +            {
 +                vp[i] = vf[i];
 +            }
 +            sfree(vf);
 +        }
 +    }
 +    else
 +    {
 +        if (dtc == xdr_datatype_double)
 +        {
 +            vd = (double *)vp;
 +        }
 +        else
 +        {
 +            snew(vd,nf);
 +        }
 +        res = xdr_vector(xd,(char *)vd,nf,
 +                         (unsigned int)sizeof(double),(xdrproc_t)xdr_double);
 +        if (res == 0)
 +        {
 +            return -1;
 +        }
 +        if (dtc != xdr_datatype_double)
 +        {
 +            for(i=0; i<nf; i++)
 +            {
 +                vp[i] = vd[i];
 +            }
 +            sfree(vd);
 +        }
 +    }
 +    
 +    if (list)
 +    {
 +        switch (erealtype)
 +        {
 +        case ecprREAL:
 +            pr_reals(list,0,st_names(cptp,ecpt),vp,nf);
 +            break;
 +        case ecprRVEC:
 +            pr_rvecs(list,0,st_names(cptp,ecpt),(rvec *)vp,nf/3);
 +            break;
 +        default:
 +            gmx_incons("Unknown checkpoint real type");
 +        }
 +    }
 +    if (va)
 +    {
 +        sfree(va);
 +    }
 +
 +    return 0;
 +}
 +
 +
 +/* This function stores n along with the reals for reading,
 + * but on reading it assumes that n matches the value in the checkpoint file,
 + * a fatal error is generated when this is not the case.
 + */
 +static int do_cpte_reals(XDR *xd,int cptp,int ecpt,int sflags,
 +                         int n,real **v,FILE *list)
 +{
 +    return do_cpte_reals_low(xd,cptp,ecpt,sflags,n,NULL,v,list,ecprREAL);
 +}
 +
 +/* This function does the same as do_cpte_reals,
 + * except that on reading it ignores the passed value of *n
 + * and stored the value read from the checkpoint file in *n.
 + */
 +static int do_cpte_n_reals(XDR *xd,int cptp,int ecpt,int sflags,
 +                           int *n,real **v,FILE *list)
 +{
 +    return do_cpte_reals_low(xd,cptp,ecpt,sflags,-1,n,v,list,ecprREAL);
 +}
 +
 +static int do_cpte_real(XDR *xd,int cptp,int ecpt,int sflags,
 +                        real *r,FILE *list)
 +{
 +    int n;
 +
 +    return do_cpte_reals_low(xd,cptp,ecpt,sflags,1,NULL,&r,list,ecprREAL);
 +}
 +
 +static int do_cpte_ints(XDR *xd,int cptp,int ecpt,int sflags,
 +                        int n,int **v,FILE *list)
 +{
 +    bool_t res=0;
 +    int  dtc=xdr_datatype_int;
 +    int *vp,*va=NULL;
 +    int  nf,dt,i;
 +    
 +    nf = n;
 +    res = xdr_int(xd,&nf);
 +    if (res == 0)
 +    {
 +        return -1;
 +    }
 +    if (list == NULL && v != NULL && nf != n)
 +    {
 +        gmx_fatal(FARGS,"Count mismatch for state entry %s, code count is %d, file count is %d\n",st_names(cptp,ecpt),n,nf);
 +    }
 +    dt = dtc;
 +    res = xdr_int(xd,&dt);
 +    if (res == 0)
 +    {
 +        return -1;
 +    }
 +    if (dt != dtc)
 +    {
 +        gmx_fatal(FARGS,"Type mismatch for state entry %s, code type is %s, file type is %s\n",
 +                  st_names(cptp,ecpt),xdr_datatype_names[dtc],
 +                  xdr_datatype_names[dt]);
 +    }
 +    if (list || !(sflags & (1<<ecpt)) || v == NULL)
 +    {
 +        snew(va,nf);
 +        vp = va;
 +    }
 +    else
 +    {
 +        if (*v == NULL)
 +        {
 +            snew(*v,nf);
 +        }
 +        vp = *v;
 +    }
 +    res = xdr_vector(xd,(char *)vp,nf,
 +                     (unsigned int)sizeof(int),(xdrproc_t)xdr_int);
 +    if (res == 0)
 +    {
 +        return -1;
 +    }
 +    if (list)
 +    {
 +        pr_ivec(list,0,st_names(cptp,ecpt),vp,nf,TRUE);
 +    }
 +    if (va)
 +    {
 +        sfree(va);
 +    }
 +
 +    return 0;
 +}
 +
 +static int do_cpte_int(XDR *xd,int cptp,int ecpt,int sflags,
 +                       int *i,FILE *list)
 +{
 +    return do_cpte_ints(xd,cptp,ecpt,sflags,1,&i,list);
 +}
 +
 +static int do_cpte_doubles(XDR *xd,int cptp,int ecpt,int sflags,
 +                           int n,double **v,FILE *list)
 +{
 +    bool_t res=0;
 +    int  dtc=xdr_datatype_double;
 +    double *vp,*va=NULL;
 +    int  nf,dt,i;
 +    
 +    nf = n;
 +    res = xdr_int(xd,&nf);
 +    if (res == 0)
 +    {
 +        return -1;
 +    }
 +    if (list == NULL && nf != n)
 +    {
 +        gmx_fatal(FARGS,"Count mismatch for state entry %s, code count is %d, file count is %d\n",st_names(cptp,ecpt),n,nf);
 +    }
 +    dt = dtc;
 +    res = xdr_int(xd,&dt);
 +    if (res == 0)
 +    {
 +        return -1;
 +    }
 +    if (dt != dtc)
 +    {
 +        gmx_fatal(FARGS,"Precision mismatch for state entry %s, code precision is %s, file precision is %s\n",
 +                  st_names(cptp,ecpt),xdr_datatype_names[dtc],
 +                  xdr_datatype_names[dt]);
 +    }
 +    if (list || !(sflags & (1<<ecpt)))
 +    {
 +        snew(va,nf);
 +        vp = va;
 +    }
 +    else
 +    {
 +        if (*v == NULL)
 +        {
 +            snew(*v,nf);
 +        }
 +        vp = *v;
 +    }
 +    res = xdr_vector(xd,(char *)vp,nf,
 +                     (unsigned int)sizeof(double),(xdrproc_t)xdr_double);
 +    if (res == 0)
 +    {
 +        return -1;
 +    }
 +    if (list)
 +    {
 +        pr_doubles(list,0,st_names(cptp,ecpt),vp,nf);
 +    }
 +    if (va)
 +    {
 +        sfree(va);
 +    }
 +
 +    return 0;
 +}
 +
 +static int do_cpte_double(XDR *xd,int cptp,int ecpt,int sflags,
 +                          double *r,FILE *list)
 +{
 +    return do_cpte_doubles(xd,cptp,ecpt,sflags,1,&r,list);
 +}
 +
 +
 +static int do_cpte_rvecs(XDR *xd,int cptp,int ecpt,int sflags,
 +                         int n,rvec **v,FILE *list)
 +{
 +    int n3;
 +
 +    return do_cpte_reals_low(xd,cptp,ecpt,sflags,
 +                             n*DIM,NULL,(real **)v,list,ecprRVEC);
 +}
 +
 +static int do_cpte_matrix(XDR *xd,int cptp,int ecpt,int sflags,
 +                          matrix v,FILE *list)
 +{
 +    real *vr;
 +    real ret;
 +
 +    vr = (real *)&(v[0][0]);
 +    ret = do_cpte_reals_low(xd,cptp,ecpt,sflags,
 +                            DIM*DIM,NULL,&vr,NULL,ecprMATRIX);
 +    
 +    if (list && ret == 0)
 +    {
 +        pr_rvecs(list,0,st_names(cptp,ecpt),v,DIM);
 +    }
 +    
 +    return ret;
 +}
 +
 +
 +static int do_cpte_nmatrix(XDR *xd,int cptp,int ecpt,int sflags,
 +                           int n, real **v,FILE *list)
 +{
 +    int i;
 +    real *vr;
 +    real ret,reti;
 +    char name[CPTSTRLEN];
 +
 +    ret = 0;
 +    if (v==NULL)
 +    {
 +        snew(v,n);
 +    }
 +    for (i=0;i<n;i++)
 +    {
 +        reti = 0;
 +        vr = v[i];
 +        reti = do_cpte_reals_low(xd,cptp,ecpt,sflags,n,NULL,&(v[i]),NULL,ecprREAL);
 +        if (list && reti == 0)
 +        {
 +            sprintf(name,"%s[%d]",st_names(cptp,ecpt),i);
 +            pr_reals(list,0,name,v[i],n);
 +        }
 +        if (reti == 0)
 +        {
 +            ret = 0;
 +        }
 +    }
 +    return ret;
 +}
 +
 +static int do_cpte_matrices(XDR *xd,int cptp,int ecpt,int sflags,
 +                            int n,matrix **v,FILE *list)
 +{
 +    bool_t res=0;
 +    matrix *vp,*va=NULL;
 +    real *vr;
 +    int  nf,i,j,k;
 +    int  ret;
 +
 +    nf = n;
 +    res = xdr_int(xd,&nf);
 +    if (res == 0)
 +    {
 +        return -1;
 +    }
 +    if (list == NULL && nf != n)
 +    {
 +        gmx_fatal(FARGS,"Count mismatch for state entry %s, code count is %d, file count is %d\n",st_names(cptp,ecpt),n,nf);
 +    }
 +    if (list || !(sflags & (1<<ecpt)))
 +    {
 +        snew(va,nf);
 +        vp = va;
 +    }
 +    else
 +    {
 +        if (*v == NULL)
 +        {
 +            snew(*v,nf);
 +        }
 +        vp = *v;
 +    }
 +    snew(vr,nf*DIM*DIM);
 +    for(i=0; i<nf; i++)
 +    {
 +        for(j=0; j<DIM; j++)
 +        {
 +            for(k=0; k<DIM; k++)
 +            {
 +                vr[(i*DIM+j)*DIM+k] = vp[i][j][k];
 +            }
 +        }
 +    }
 +    ret = do_cpte_reals_low(xd,cptp,ecpt,sflags,
 +                            nf*DIM*DIM,NULL,&vr,NULL,ecprMATRIX);
 +    for(i=0; i<nf; i++)
 +    {
 +        for(j=0; j<DIM; j++)
 +        {
 +            for(k=0; k<DIM; k++)
 +            {
 +                vp[i][j][k] = vr[(i*DIM+j)*DIM+k];
 +            }
 +        }
 +    }
 +    sfree(vr);
 +    
 +    if (list && ret == 0)
 +    {
 +        for(i=0; i<nf; i++)
 +        {
 +            pr_rvecs(list,0,st_names(cptp,ecpt),vp[i],DIM);
 +        }
 +    }
 +    if (va)
 +    {
 +        sfree(va);
 +    }
 +    
 +    return ret;
 +}
 +
 +static void do_cpt_header(XDR *xd,gmx_bool bRead,int *file_version,
 +                          char **version,char **btime,char **buser,char **bhost,
 +                          int *double_prec,
 +                          char **fprog,char **ftime,
 +                          int *eIntegrator,int *simulation_part,
 +                          gmx_large_int_t *step,double *t,
 +                          int *nnodes,int *dd_nc,int *npme,
 +                          int *natoms,int *ngtc, int *nnhpres, int *nhchainlength,
 +                          int *nlambda, int *flags_state,
 +                          int *flags_eks,int *flags_enh, int *flags_dfh,
++                          int *nED,
 +                          FILE *list)
 +{
 +    bool_t res=0;
 +    int  magic;
 +    int  idum=0;
 +    int  i;
 +    char *fhost;
 +
 +    if (bRead)
 +    {
 +        magic = -1;
 +    }
 +    else
 +    {
 +        magic = CPT_MAGIC1;
 +    }
 +    res = xdr_int(xd,&magic);
 +    if (res == 0)
 +    {
 +        gmx_fatal(FARGS,"The checkpoint file is empty/corrupted, or maybe you are out of disk space?");
 +    }
 +    if (magic != CPT_MAGIC1)
 +    {
 +        gmx_fatal(FARGS,"Start of file magic number mismatch, checkpoint file has %d, should be %d\n"
 +                  "The checkpoint file is corrupted or not a checkpoint file",
 +                  magic,CPT_MAGIC1);
 +    }
 +    if (!bRead)
 +    {
 +        snew(fhost,255);
 +#ifdef HAVE_UNISTD_H
 +        if (gethostname(fhost,255) != 0)
 +        {
 +            sprintf(fhost,"unknown");
 +        }
 +#else
 +        sprintf(fhost,"unknown");
 +#endif  
 +    }
 +    do_cpt_string_err(xd,bRead,"GROMACS version"           ,version,list);
 +    do_cpt_string_err(xd,bRead,"GROMACS build time"        ,btime,list);
 +    do_cpt_string_err(xd,bRead,"GROMACS build user"        ,buser,list);
 +    do_cpt_string_err(xd,bRead,"GROMACS build host"        ,bhost,list);
 +    do_cpt_string_err(xd,bRead,"generating program"        ,fprog,list);
 +    do_cpt_string_err(xd,bRead,"generation time"           ,ftime,list);
 +    *file_version = cpt_version;
 +    do_cpt_int_err(xd,"checkpoint file version",file_version,list);
 +    if (*file_version > cpt_version)
 +    {
 +        gmx_fatal(FARGS,"Attempting to read a checkpoint file of version %d with code of version %d\n",*file_version,cpt_version);
 +    }
 +    if (*file_version >= 13)
 +    {
 +        do_cpt_int_err(xd,"GROMACS double precision",double_prec,list);
 +    }
 +    else
 +    {
 +        *double_prec = -1;
 +    }
 +    if (*file_version >= 12)
 +    {
 +        do_cpt_string_err(xd,bRead,"generating host"           ,&fhost,list);
 +        if (list == NULL)
 +        {
 +            sfree(fhost);
 +        }
 +    }
 +    do_cpt_int_err(xd,"#atoms"            ,natoms     ,list);
 +    do_cpt_int_err(xd,"#T-coupling groups",ngtc       ,list);
 +    if (*file_version >= 10) 
 +    {
 +        do_cpt_int_err(xd,"#Nose-Hoover T-chains",nhchainlength,list);
 +    }
 +    else
 +    {
 +        *nhchainlength = 1;
 +    }
 +    if (*file_version >= 11)
 +    {
 +        do_cpt_int_err(xd,"#Nose-Hoover T-chains for barostat ",nnhpres,list);
 +    }
 +    else
 +    {
 +        *nnhpres = 0;
 +    }
 +    if (*file_version >= 14)
 +    {
 +        do_cpt_int_err(xd,"# of total lambda states ",nlambda,list);
 +    }
 +    else
 +    {
 +        *nlambda = 0;
 +    }
 +    do_cpt_int_err(xd,"integrator"        ,eIntegrator,list);
 +      if (*file_version >= 3)
 +      {
 +              do_cpt_int_err(xd,"simulation part #", simulation_part,list);
 +      }
 +      else
 +      {
 +              *simulation_part = 1;
 +      }
 +    if (*file_version >= 5)
 +    {
 +        do_cpt_step_err(xd,"step"         ,step       ,list);
 +    }
 +    else
 +    {
 +        do_cpt_int_err(xd,"step"          ,&idum      ,list);
 +        *step = idum;
 +    }
 +    do_cpt_double_err(xd,"t"              ,t          ,list);
 +    do_cpt_int_err(xd,"#PP-nodes"         ,nnodes     ,list);
 +    idum = 1;
 +    do_cpt_int_err(xd,"dd_nc[x]",dd_nc ? &(dd_nc[0]) : &idum,list);
 +    do_cpt_int_err(xd,"dd_nc[y]",dd_nc ? &(dd_nc[1]) : &idum,list);
 +    do_cpt_int_err(xd,"dd_nc[z]",dd_nc ? &(dd_nc[2]) : &idum,list);
 +    do_cpt_int_err(xd,"#PME-only nodes",npme,list);
 +    do_cpt_int_err(xd,"state flags",flags_state,list);
 +      if (*file_version >= 4)
 +    {
 +        do_cpt_int_err(xd,"ekin data flags",flags_eks,list);
 +        do_cpt_int_err(xd,"energy history flags",flags_enh,list);
 +    }
 +    else
 +    {
 +        *flags_eks  = 0;
 +        *flags_enh   = (*flags_state >> (estORIRE_DTAV+1));
 +        *flags_state = (*flags_state & ~((1<<(estORIRE_DTAV+1)) |
 +                                         (1<<(estORIRE_DTAV+2)) |
 +                                         (1<<(estORIRE_DTAV+3))));
 +    }
 +      if (*file_version >= 14)
 +    {
 +        do_cpt_int_err(xd,"df history flags",flags_dfh,list);
 +    } else {
 +        *flags_dfh = 0;
 +    }
++
++    if (*file_version >= 15)
++    {
++        do_cpt_int_err(xd,"ED data sets",nED,list);
++    }
++    else
++    {
++        *nED = 0;
++    }
 +}
 +
 +static int do_cpt_footer(XDR *xd,gmx_bool bRead,int file_version)
 +{
 +    bool_t res=0;
 +    int  magic;
 +    
 +    if (file_version >= 2)
 +    {
 +        magic = CPT_MAGIC2;
 +        res = xdr_int(xd,&magic);
 +        if (res == 0)
 +        {
 +            cp_error();
 +        }
 +        if (magic != CPT_MAGIC2)
 +        {
 +            return -1;
 +        }
 +    }
 +
 +    return 0;
 +}
 +
 +static int do_cpt_state(XDR *xd,gmx_bool bRead,
 +                        int fflags,t_state *state,
 +                        gmx_bool bReadRNG,FILE *list)
 +{
 +    int  sflags;
 +    int  **rng_p,**rngi_p;
 +    int  i;
 +    int  ret;
 +    int  nnht,nnhtp;
 +
 +    ret = 0;
 +    
 +    nnht = state->nhchainlength*state->ngtc;
 +    nnhtp = state->nhchainlength*state->nnhpres;
 +
 +    if (bReadRNG)
 +    {
 +        rng_p  = (int **)&state->ld_rng;
 +        rngi_p = &state->ld_rngi;
 +    }
 +    else
 +    {
 +        /* Do not read the RNG data */
 +        rng_p  = NULL;
 +        rngi_p = NULL;
 +    }
 +    /* We want the MC_RNG the same across all the notes for now -- lambda MC is global */
 +
 +    sflags = state->flags;
 +    for(i=0; (i<estNR && ret == 0); i++)
 +    {
 +        if (fflags & (1<<i))
 +        {
 +            switch (i)
 +            {
 +            case estLAMBDA:  ret = do_cpte_reals(xd,cptpEST,i,sflags,efptNR,&(state->lambda),list); break;
 +            case estFEPSTATE: ret = do_cpte_int (xd,cptpEST,i,sflags,&state->fep_state,list); break;
 +            case estBOX:     ret = do_cpte_matrix(xd,cptpEST,i,sflags,state->box,list); break;
 +            case estBOX_REL: ret = do_cpte_matrix(xd,cptpEST,i,sflags,state->box_rel,list); break;
 +            case estBOXV:    ret = do_cpte_matrix(xd,cptpEST,i,sflags,state->boxv,list); break;
 +            case estPRES_PREV: ret = do_cpte_matrix(xd,cptpEST,i,sflags,state->pres_prev,list); break;
 +            case estSVIR_PREV:  ret = do_cpte_matrix(xd,cptpEST,i,sflags,state->svir_prev,list); break;
 +            case estFVIR_PREV:  ret = do_cpte_matrix(xd,cptpEST,i,sflags,state->fvir_prev,list); break;
 +            case estNH_XI:   ret = do_cpte_doubles(xd,cptpEST,i,sflags,nnht,&state->nosehoover_xi,list); break;
 +            case estNH_VXI:  ret = do_cpte_doubles(xd,cptpEST,i,sflags,nnht,&state->nosehoover_vxi,list); break;
 +            case estNHPRES_XI:   ret = do_cpte_doubles(xd,cptpEST,i,sflags,nnhtp,&state->nhpres_xi,list); break;
 +            case estNHPRES_VXI:  ret = do_cpte_doubles(xd,cptpEST,i,sflags,nnhtp,&state->nhpres_vxi,list); break;
 +            case estTC_INT:  ret = do_cpte_doubles(xd,cptpEST,i,sflags,state->ngtc,&state->therm_integral,list); break;
 +            case estVETA:    ret = do_cpte_real(xd,cptpEST,i,sflags,&state->veta,list); break;
 +            case estVOL0:    ret = do_cpte_real(xd,cptpEST,i,sflags,&state->vol0,list); break;
 +            case estX:       ret = do_cpte_rvecs(xd,cptpEST,i,sflags,state->natoms,&state->x,list); break;
 +            case estV:       ret = do_cpte_rvecs(xd,cptpEST,i,sflags,state->natoms,&state->v,list); break;
 +            case estSDX:     ret = do_cpte_rvecs(xd,cptpEST,i,sflags,state->natoms,&state->sd_X,list); break;
 +            case estLD_RNG:  ret = do_cpte_ints(xd,cptpEST,i,sflags,state->nrng,rng_p,list); break;
 +            case estLD_RNGI: ret = do_cpte_ints(xd,cptpEST,i,sflags,state->nrngi,rngi_p,list); break;
 +            case estMC_RNG:  ret = do_cpte_ints(xd,cptpEST,i,sflags,state->nmcrng,(int **)&state->mc_rng,list); break;
 +            case estMC_RNGI: ret = do_cpte_ints(xd,cptpEST,i,sflags,1,&state->mc_rngi,list); break;
 +            case estDISRE_INITF:  ret = do_cpte_real (xd,cptpEST,i,sflags,&state->hist.disre_initf,list); break;
 +            case estDISRE_RM3TAV: ret = do_cpte_reals(xd,cptpEST,i,sflags,state->hist.ndisrepairs,&state->hist.disre_rm3tav,list); break;
 +            case estORIRE_INITF:  ret = do_cpte_real (xd,cptpEST,i,sflags,&state->hist.orire_initf,list); break;
 +            case estORIRE_DTAV:   ret = do_cpte_reals(xd,cptpEST,i,sflags,state->hist.norire_Dtav,&state->hist.orire_Dtav,list); break;
 +            default:
 +                gmx_fatal(FARGS,"Unknown state entry %d\n"
 +                          "You are probably reading a new checkpoint file with old code",i);
 +            }
 +        }
 +    }
 +    
 +    return ret;
 +}
 +
 +static int do_cpt_ekinstate(XDR *xd,gmx_bool bRead,
 +                            int fflags,ekinstate_t *ekins,
 +                            FILE *list)
 +{
 +    int  i;
 +    int  ret;
 +
 +    ret = 0;
 +
 +    for(i=0; (i<eeksNR && ret == 0); i++)
 +    {
 +        if (fflags & (1<<i))
 +        {
 +            switch (i)
 +            {
 +                
 +                      case eeksEKIN_N:     ret = do_cpte_int(xd,cptpEEKS,i,fflags,&ekins->ekin_n,list); break;
 +                      case eeksEKINH :     ret = do_cpte_matrices(xd,cptpEEKS,i,fflags,ekins->ekin_n,&ekins->ekinh,list); break;
 +                      case eeksEKINF:      ret = do_cpte_matrices(xd,cptpEEKS,i,fflags,ekins->ekin_n,&ekins->ekinf,list); break;
 +                      case eeksEKINO:      ret = do_cpte_matrices(xd,cptpEEKS,i,fflags,ekins->ekin_n,&ekins->ekinh_old,list); break;
 +            case eeksEKINTOTAL:  ret = do_cpte_matrix(xd,cptpEEKS,i,fflags,ekins->ekin_total,list); break;
 +            case eeksEKINSCALEF: ret = do_cpte_doubles(xd,cptpEEKS,i,fflags,ekins->ekin_n,&ekins->ekinscalef_nhc,list); break;
 +            case eeksVSCALE:     ret = do_cpte_doubles(xd,1,cptpEEKS,fflags,ekins->ekin_n,&ekins->vscale_nhc,list); break;
 +            case eeksEKINSCALEH: ret = do_cpte_doubles(xd,1,cptpEEKS,fflags,ekins->ekin_n,&ekins->ekinscaleh_nhc,list); break;
 +                      case eeksDEKINDL :   ret = do_cpte_real(xd,1,cptpEEKS,fflags,&ekins->dekindl,list); break;
 +            case eeksMVCOS:      ret = do_cpte_real(xd,1,cptpEEKS,fflags,&ekins->mvcos,list); break;
 +            default:
 +                gmx_fatal(FARGS,"Unknown ekin data state entry %d\n"
 +                          "You are probably reading a new checkpoint file with old code",i);
 +            }
 +        }
 +    }
 +    
 +    return ret;
 +}
 +
 +
 +static int do_cpt_enerhist(XDR *xd,gmx_bool bRead,
 +                           int fflags,energyhistory_t *enerhist,
 +                           FILE *list)
 +{
 +    int  i;
 +    int  j;
 +    int  ret;
 +
 +    ret = 0;
 +
 +    if (bRead)
 +    {
 +        enerhist->nsteps     = 0;
 +        enerhist->nsum       = 0;
 +        enerhist->nsteps_sim = 0;
 +        enerhist->nsum_sim   = 0;
 +        enerhist->dht        = NULL;
 +
 +        if (fflags & (1<< eenhENERGY_DELTA_H_NN) )
 +        {
 +            snew(enerhist->dht,1);
 +            enerhist->dht->ndh = NULL;
 +            enerhist->dht->dh = NULL;
 +            enerhist->dht->start_lambda_set=FALSE;
 +        }
 +    }
 +
 +    for(i=0; (i<eenhNR && ret == 0); i++)
 +    {
 +        if (fflags & (1<<i))
 +        {
 +            switch (i)
 +            {
 +                      case eenhENERGY_N:     ret = do_cpte_int(xd,cptpEENH,i,fflags,&enerhist->nener,list); break;
 +                      case eenhENERGY_AVER:  ret = do_cpte_doubles(xd,cptpEENH,i,fflags,enerhist->nener,&enerhist->ener_ave,list); break;
 +                      case eenhENERGY_SUM:   ret = do_cpte_doubles(xd,cptpEENH,i,fflags,enerhist->nener,&enerhist->ener_sum,list); break;
 +            case eenhENERGY_NSUM:  do_cpt_step_err(xd,eenh_names[i],&enerhist->nsum,list); break;
 +            case eenhENERGY_SUM_SIM: ret = do_cpte_doubles(xd,cptpEENH,i,fflags,enerhist->nener,&enerhist->ener_sum_sim,list); break;
 +            case eenhENERGY_NSUM_SIM:   do_cpt_step_err(xd,eenh_names[i],&enerhist->nsum_sim,list); break;
 +            case eenhENERGY_NSTEPS:     do_cpt_step_err(xd,eenh_names[i],&enerhist->nsteps,list); break;
 +            case eenhENERGY_NSTEPS_SIM: do_cpt_step_err(xd,eenh_names[i],&enerhist->nsteps_sim,list); break;
 +            case eenhENERGY_DELTA_H_NN: do_cpt_int_err(xd,eenh_names[i], &(enerhist->dht->nndh), list);
 +                if (bRead) /* now allocate memory for it */
 +                {
 +                    snew(enerhist->dht->dh, enerhist->dht->nndh);
 +                    snew(enerhist->dht->ndh, enerhist->dht->nndh);
 +                    for(j=0;j<enerhist->dht->nndh;j++)
 +                    {
 +                        enerhist->dht->ndh[j] = 0;
 +                        enerhist->dht->dh[j] = NULL;
 +                    }
 +                }
 +                break;
 +            case eenhENERGY_DELTA_H_LIST:
 +                for(j=0;j<enerhist->dht->nndh;j++)
 +                {
 +                    ret=do_cpte_n_reals(xd, cptpEENH, i, fflags, &enerhist->dht->ndh[j], &(enerhist->dht->dh[j]), list);
 +                }
 +                break;
 +            case eenhENERGY_DELTA_H_STARTTIME:
 +                ret=do_cpte_double(xd, cptpEENH, i, fflags, &(enerhist->dht->start_time), list); break;
 +            case eenhENERGY_DELTA_H_STARTLAMBDA:
 +                ret=do_cpte_double(xd, cptpEENH, i, fflags, &(enerhist->dht->start_lambda), list); break;
 +            default:
 +                gmx_fatal(FARGS,"Unknown energy history entry %d\n"
 +                          "You are probably reading a new checkpoint file with old code",i);
 +            }
 +        }
 +    }
 +
 +    if ((fflags & (1<<eenhENERGY_SUM)) && !(fflags & (1<<eenhENERGY_SUM_SIM)))
 +    {
 +        /* Assume we have an old file format and copy sum to sum_sim */
 +        srenew(enerhist->ener_sum_sim,enerhist->nener);
 +        for(i=0; i<enerhist->nener; i++)
 +        {
 +            enerhist->ener_sum_sim[i] = enerhist->ener_sum[i];
 +        }
 +        fflags |= (1<<eenhENERGY_SUM_SIM);
 +    }
 +    
 +    if ( (fflags & (1<<eenhENERGY_NSUM)) &&
 +        !(fflags & (1<<eenhENERGY_NSTEPS)))
 +    {
 +        /* Assume we have an old file format and copy nsum to nsteps */
 +        enerhist->nsteps = enerhist->nsum;
 +        fflags |= (1<<eenhENERGY_NSTEPS);
 +    }
 +    if ( (fflags & (1<<eenhENERGY_NSUM_SIM)) &&
 +        !(fflags & (1<<eenhENERGY_NSTEPS_SIM)))
 +    {
 +        /* Assume we have an old file format and copy nsum to nsteps */
 +        enerhist->nsteps_sim = enerhist->nsum_sim;
 +        fflags |= (1<<eenhENERGY_NSTEPS_SIM);
 +    }
 +
 +    return ret;
 +}
 +
 +static int do_cpt_df_hist(XDR *xd,gmx_bool bRead,int fflags,df_history_t *dfhist,FILE *list)
 +{
 +    int  i,nlambda;
 +    int  ret;
 +
 +    nlambda = dfhist->nlambda;
 +    ret = 0;
 +
 +    for(i=0; (i<edfhNR && ret == 0); i++)
 +    {
 +        if (fflags & (1<<i))
 +        {
 +            switch (i)
 +            {
 +            case edfhBEQUIL:       ret = do_cpte_int(xd,cptpEDFH,i,fflags,&dfhist->bEquil,list); break;
 +            case edfhNATLAMBDA:    ret = do_cpte_ints(xd,cptpEDFH,i,fflags,nlambda,&dfhist->n_at_lam,list); break;
 +            case edfhWLHISTO:      ret = do_cpte_reals(xd,cptpEDFH,i,fflags,nlambda,&dfhist->wl_histo,list); break;
 +            case edfhWLDELTA:      ret = do_cpte_real(xd,cptpEDFH,i,fflags,&dfhist->wl_delta,list); break;
 +            case edfhSUMWEIGHTS:   ret = do_cpte_reals(xd,cptpEDFH,i,fflags,nlambda,&dfhist->sum_weights,list); break;
 +            case edfhSUMDG:        ret = do_cpte_reals(xd,cptpEDFH,i,fflags,nlambda,&dfhist->sum_dg,list); break;
 +            case edfhSUMMINVAR:    ret = do_cpte_reals(xd,cptpEDFH,i,fflags,nlambda,&dfhist->sum_minvar,list); break;
 +            case edfhSUMVAR:       ret = do_cpte_reals(xd,cptpEDFH,i,fflags,nlambda,&dfhist->sum_variance,list); break;
 +            case edfhACCUMP:       ret = do_cpte_nmatrix(xd,cptpEDFH,i,fflags,nlambda,dfhist->accum_p,list); break;
 +            case edfhACCUMM:       ret = do_cpte_nmatrix(xd,cptpEDFH,i,fflags,nlambda,dfhist->accum_m,list); break;
 +            case edfhACCUMP2:      ret = do_cpte_nmatrix(xd,cptpEDFH,i,fflags,nlambda,dfhist->accum_p2,list); break;
 +            case edfhACCUMM2:      ret = do_cpte_nmatrix(xd,cptpEDFH,i,fflags,nlambda,dfhist->accum_m2,list); break;
 +            case edfhTIJ:          ret = do_cpte_nmatrix(xd,cptpEDFH,i,fflags,nlambda,dfhist->Tij,list); break;
 +            case edfhTIJEMP:       ret = do_cpte_nmatrix(xd,cptpEDFH,i,fflags,nlambda,dfhist->Tij_empirical,list); break;
 +
 +            default:
 +                gmx_fatal(FARGS,"Unknown df history entry %d\n"
 +                          "You are probably reading a new checkpoint file with old code",i);
 +            }
 +        }
 +    }
 +
 +    return ret;
 +}
 +
++
++/* This function stores the last whole configuration of the reference and
++ * average structure in the .cpt file
++ */
++static int do_cpt_EDstate(XDR *xd,gmx_bool bRead,
++        edsamstate_t *EDstate, FILE *list)
++{
++    int i,j;
++    int ret=0;
++    char buf[STRLEN];
++
++
++    EDstate->bFromCpt = bRead;
++
++    if (EDstate->nED <= 0)
++    {
++        return ret;
++    }
++
++    /* When reading, init_edsam has not been called yet,
++     * so we have to allocate memory first. */
++    if (bRead)
++    {
++        snew(EDstate->nref    , EDstate->nED);
++        snew(EDstate->old_sref, EDstate->nED);
++        snew(EDstate->nav     , EDstate->nED);
++        snew(EDstate->old_sav , EDstate->nED);
++    }
++
++    /* Read/write the last whole conformation of SREF and SAV for each ED dataset (usually only one) */
++    for (i=0; i< EDstate->nED; i++)
++    {
++        /* Reference structure SREF */
++        sprintf(buf, "ED%d # of atoms in reference structure", i+1);
++        do_cpt_int_err(xd, buf, &EDstate->nref[i],list);
++        sprintf(buf, "ED%d x_ref", i+1);
++        if (bRead)
++        {
++            snew(EDstate->old_sref[i], EDstate->nref[i]);
++            do_cpt_n_rvecs_err(xd, buf, EDstate->nref[i], EDstate->old_sref[i], list);
++        }
++        else
++        {
++            do_cpt_n_rvecs_err(xd, buf, EDstate->nref[i], EDstate->old_sref_p[i], list);
++        }
++
++        /* Average structure SAV */
++        sprintf(buf, "ED%d # of atoms in average structure", i+1);
++        do_cpt_int_err(xd, buf, &EDstate->nav[i] ,list);
++        sprintf(buf, "ED%d x_av", i+1);
++        if (bRead)
++        {
++            snew(EDstate->old_sav[i], EDstate->nav[i]);
++            do_cpt_n_rvecs_err(xd, buf, EDstate->nav[i], EDstate->old_sav[i], list);
++        }
++        else
++        {
++            do_cpt_n_rvecs_err(xd, buf, EDstate->nav[i], EDstate->old_sav_p[i], list);
++        }
++    }
++
++    return ret;
++}
++
++
 +static int do_cpt_files(XDR *xd, gmx_bool bRead, 
 +                        gmx_file_position_t **p_outputfiles, int *nfiles, 
 +                        FILE *list, int file_version)
 +{
 +    int    i,j;
 +    gmx_off_t  offset;
 +    gmx_off_t  mask = 0xFFFFFFFFL;
 +    int    offset_high,offset_low;
 +    char   *buf;
 +    gmx_file_position_t *outputfiles;
 +
 +    if (do_cpt_int(xd,"number of output files",nfiles,list) != 0)
 +    {
 +        return -1;
 +    }
 +
 +    if(bRead)
 +    {
 +        snew(*p_outputfiles,*nfiles);
 +    }
 +
 +    outputfiles = *p_outputfiles;
 +
 +    for(i=0;i<*nfiles;i++)
 +    {
 +        /* 64-bit XDR numbers are not portable, so it is stored as separate high/low fractions */
 +        if(bRead)
 +        {
 +            do_cpt_string_err(xd,bRead,"output filename",&buf,list);
 +            strncpy(outputfiles[i].filename,buf,CPTSTRLEN-1);
 +            if(list==NULL)
 +            {
 +                sfree(buf);                   
 +            }
 +
 +            if (do_cpt_int(xd,"file_offset_high",&offset_high,list) != 0)
 +            {
 +                return -1;
 +            }
 +            if (do_cpt_int(xd,"file_offset_low",&offset_low,list) != 0)
 +            {
 +                return -1;
 +            }
 +#if (SIZEOF_GMX_OFF_T > 4)
 +            outputfiles[i].offset = ( ((gmx_off_t) offset_high) << 32 ) | ( (gmx_off_t) offset_low & mask );
 +#else
 +            outputfiles[i].offset = offset_low;
 +#endif
 +        }
 +        else
 +        {
 +            buf = outputfiles[i].filename;
 +            do_cpt_string_err(xd,bRead,"output filename",&buf,list);
 +            /* writing */
 +            offset      = outputfiles[i].offset;
 +            if (offset == -1)
 +            {
 +                offset_low  = -1;
 +                offset_high = -1;
 +            }
 +            else
 +            {
 +#if (SIZEOF_GMX_OFF_T > 4)
 +                offset_low  = (int) (offset & mask);
 +                offset_high = (int) ((offset >> 32) & mask);
 +#else
 +                offset_low  = offset;
 +                offset_high = 0;
 +#endif
 +            }
 +            if (do_cpt_int(xd,"file_offset_high",&offset_high,list) != 0)
 +            {
 +                return -1;
 +            }
 +            if (do_cpt_int(xd,"file_offset_low",&offset_low,list) != 0)
 +            {
 +                return -1;
 +            }
 +        }
 +        if (file_version >= 8)
 +        {
 +            if (do_cpt_int(xd,"file_checksum_size",&(outputfiles[i].chksum_size),
 +                           list) != 0)
 +            {
 +                return -1;
 +            }
 +            if (do_cpt_u_chars(xd,"file_checksum",16,outputfiles[i].chksum,list) != 0)
 +            {
 +                return -1;
 +            }
 +        } 
 +        else 
 +        {
 +            outputfiles[i].chksum_size = -1;
 +        }
 +    }
 +    return 0;
 +}
 +
 +
 +void write_checkpoint(const char *fn,gmx_bool bNumberAndKeep,
 +                      FILE *fplog,t_commrec *cr,
 +                      int eIntegrator,int simulation_part,
 +                      gmx_bool bExpanded, int elamstats,
 +                      gmx_large_int_t step,double t,t_state *state)
 +{
 +    t_fileio *fp;
 +    int  file_version;
 +    char *version;
 +    char *btime;
 +    char *buser;
 +    char *bhost;
 +    int  double_prec;
 +    char *fprog;
 +    char *fntemp; /* the temporary checkpoint file name */
 +    time_t now;
 +    char timebuf[STRLEN];
 +    int  nppnodes,npmenodes,flag_64bit;
 +    char buf[1024],suffix[5+STEPSTRSIZE],sbuf[STEPSTRSIZE];
 +    gmx_file_position_t *outputfiles;
 +    int  noutputfiles;
 +    char *ftime;
 +    int  flags_eks,flags_enh,flags_dfh,i;
 +    t_fileio *ret;
 +              
 +    if (PAR(cr))
 +    {
 +        if (DOMAINDECOMP(cr))
 +        {
 +            nppnodes  = cr->dd->nnodes;
 +            npmenodes = cr->npmenodes;
 +        }
 +        else
 +        {
 +            nppnodes  = cr->nnodes;
 +            npmenodes = 0;
 +        }
 +    }
 +    else
 +    {
 +        nppnodes  = 1;
 +        npmenodes = 0;
 +    }
 +
 +    /* make the new temporary filename */
 +    snew(fntemp, strlen(fn)+5+STEPSTRSIZE);
 +    strcpy(fntemp,fn);
 +    fntemp[strlen(fn) - strlen(ftp2ext(fn2ftp(fn))) - 1] = '\0';
 +    sprintf(suffix,"_%s%s","step",gmx_step_str(step,sbuf));
 +    strcat(fntemp,suffix);
 +    strcat(fntemp,fn+strlen(fn) - strlen(ftp2ext(fn2ftp(fn))) - 1);
 +   
 +    time(&now);
 +    gmx_ctime_r(&now,timebuf,STRLEN);
 +
 +    if (fplog)
 +    { 
 +        fprintf(fplog,"Writing checkpoint, step %s at %s\n\n",
 +                gmx_step_str(step,buf),timebuf);
 +    }
 +    
 +    /* Get offsets for open files */
 +    gmx_fio_get_output_file_positions(&outputfiles, &noutputfiles);
 +
 +    fp = gmx_fio_open(fntemp,"w");
 +      
 +    if (state->ekinstate.bUpToDate)
 +    {
 +        flags_eks =
 +            ((1<<eeksEKIN_N) | (1<<eeksEKINH) | (1<<eeksEKINF) | 
 +             (1<<eeksEKINO) | (1<<eeksEKINSCALEF) | (1<<eeksEKINSCALEH) | 
 +             (1<<eeksVSCALE) | (1<<eeksDEKINDL) | (1<<eeksMVCOS));
 +    }
 +    else
 +    {
 +        flags_eks = 0;
 +    }
 +
 +    flags_enh = 0;
 +    if (state->enerhist.nsum > 0 || state->enerhist.nsum_sim > 0)
 +    {
 +        flags_enh |= (1<<eenhENERGY_N);
 +        if (state->enerhist.nsum > 0)
 +        {
 +            flags_enh |= ((1<<eenhENERGY_AVER) | (1<<eenhENERGY_SUM) |
 +                          (1<<eenhENERGY_NSTEPS) | (1<<eenhENERGY_NSUM));
 +        }
 +        if (state->enerhist.nsum_sim > 0)
 +        {
 +            flags_enh |= ((1<<eenhENERGY_SUM_SIM) | (1<<eenhENERGY_NSTEPS_SIM) |
 +                          (1<<eenhENERGY_NSUM_SIM));
 +        }
 +        if (state->enerhist.dht)
 +        {
 +            flags_enh |= ( (1<< eenhENERGY_DELTA_H_NN) |
 +                           (1<< eenhENERGY_DELTA_H_LIST) | 
 +                           (1<< eenhENERGY_DELTA_H_STARTTIME) |
 +                           (1<< eenhENERGY_DELTA_H_STARTLAMBDA) );
 +        }
 +    }
 +
 +    if (bExpanded)
 +    {
 +        flags_dfh = ((1<<edfhBEQUIL) | (1<<edfhNATLAMBDA) | (1<<edfhSUMWEIGHTS) |  (1<<edfhSUMDG)  |
 +                     (1<<edfhTIJ) | (1<<edfhTIJEMP));
 +        if (EWL(elamstats))
 +        {
 +            flags_dfh |= ((1<<edfhWLDELTA) | (1<<edfhWLHISTO));
 +        }
 +        if ((elamstats == elamstatsMINVAR) || (elamstats == elamstatsBARKER) || (elamstats == elamstatsMETROPOLIS))
 +        {
 +            flags_dfh |= ((1<<edfhACCUMP) | (1<<edfhACCUMM) | (1<<edfhACCUMP2) | (1<<edfhACCUMM2)
 +                          | (1<<edfhSUMMINVAR) | (1<<edfhSUMVAR));
 +        }
 +    } else {
 +        flags_dfh = 0;
 +    }
 +    
 +    /* We can check many more things now (CPU, acceleration, etc), but
 +     * it is highly unlikely to have two separate builds with exactly
 +     * the same version, user, time, and build host!
 +     */
 +
 +    version = gmx_strdup(VERSION);
 +    btime   = gmx_strdup(BUILD_TIME);
 +    buser   = gmx_strdup(BUILD_USER);
 +    bhost   = gmx_strdup(BUILD_HOST);
 +
 +    double_prec = GMX_CPT_BUILD_DP;
 +    fprog   = gmx_strdup(Program());
 +
 +    ftime   = &(timebuf[0]);
 +    
 +    do_cpt_header(gmx_fio_getxdr(fp),FALSE,&file_version,
 +                  &version,&btime,&buser,&bhost,&double_prec,&fprog,&ftime,
 +                  &eIntegrator,&simulation_part,&step,&t,&nppnodes,
 +                  DOMAINDECOMP(cr) ? cr->dd->nc : NULL,&npmenodes,
 +                  &state->natoms,&state->ngtc,&state->nnhpres,
 +                  &state->nhchainlength,&(state->dfhist.nlambda),&state->flags,&flags_eks,&flags_enh,&flags_dfh,
++                  &state->edsamstate.nED,
 +                  NULL);
 +    
 +    sfree(version);
 +    sfree(btime);
 +    sfree(buser);
 +    sfree(bhost);
 +    sfree(fprog);
 +
 +    if((do_cpt_state(gmx_fio_getxdr(fp),FALSE,state->flags,state,TRUE,NULL) < 0)        ||
 +       (do_cpt_ekinstate(gmx_fio_getxdr(fp),FALSE,flags_eks,&state->ekinstate,NULL) < 0)||
 +       (do_cpt_enerhist(gmx_fio_getxdr(fp),FALSE,flags_enh,&state->enerhist,NULL) < 0)  ||
 +       (do_cpt_df_hist(gmx_fio_getxdr(fp),FALSE,flags_dfh,&state->dfhist,NULL) < 0)  ||
++       (do_cpt_EDstate(gmx_fio_getxdr(fp),FALSE,&state->edsamstate,NULL) < 0)      ||
 +       (do_cpt_files(gmx_fio_getxdr(fp),FALSE,&outputfiles,&noutputfiles,NULL,
 +                     file_version) < 0))
 +    {
 +        gmx_file("Cannot read/write checkpoint; corrupt file, or maybe you are out of disk space?");
 +    }
 +
 +    do_cpt_footer(gmx_fio_getxdr(fp),FALSE,file_version);
 +
 +    /* we really, REALLY, want to make sure to physically write the checkpoint, 
 +       and all the files it depends on, out to disk. Because we've
 +       opened the checkpoint with gmx_fio_open(), it's in our list
 +       of open files.  */
 +    ret=gmx_fio_all_output_fsync();
 +
 +    if (ret)
 +    {
 +        char buf[STRLEN];
 +        sprintf(buf,
 +                "Cannot fsync '%s'; maybe you are out of disk space?",
 +                gmx_fio_getname(ret));
 +
 +        if (getenv(GMX_IGNORE_FSYNC_FAILURE_ENV)==NULL)
 +        {
 +            gmx_file(buf);
 +        }
 +        else
 +        {
 +            gmx_warning(buf);
 +        }
 +    }
 +
 +    if( gmx_fio_close(fp) != 0)
 +    {
 +        gmx_file("Cannot read/write checkpoint; corrupt file, or maybe you are out of disk space?");
 +    }
 +
 +    /* we don't move the checkpoint if the user specified they didn't want it,
 +       or if the fsyncs failed */
 +    if (!bNumberAndKeep && !ret)
 +    {
 +        if (gmx_fexist(fn))
 +        {
 +            /* Rename the previous checkpoint file */
 +            strcpy(buf,fn);
 +            buf[strlen(fn) - strlen(ftp2ext(fn2ftp(fn))) - 1] = '\0';
 +            strcat(buf,"_prev");
 +            strcat(buf,fn+strlen(fn) - strlen(ftp2ext(fn2ftp(fn))) - 1);
 +#ifndef GMX_FAHCORE
 +            /* we copy here so that if something goes wrong between now and
 +             * the rename below, there's always a state.cpt.
 +             * If renames are atomic (such as in POSIX systems),
 +             * this copying should be unneccesary.
 +             */
 +            gmx_file_copy(fn, buf, FALSE);
 +            /* We don't really care if this fails: 
 +             * there's already a new checkpoint.
 +             */
 +#else
 +            gmx_file_rename(fn, buf);
 +#endif
 +        }
 +        if (gmx_file_rename(fntemp, fn) != 0)
 +        {
 +            gmx_file("Cannot rename checkpoint file; maybe you are out of disk space?");
 +        }
 +    }
 +
 +    sfree(outputfiles);
 +    sfree(fntemp);
 +
 +#ifdef GMX_FAHCORE
 +    /*code for alternate checkpointing scheme.  moved from top of loop over 
 +      steps */
 +    fcRequestCheckPoint();
 +    if ( fcCheckPointParallel( cr->nodeid, NULL,0) == 0 ) {
 +        gmx_fatal( 3,__FILE__,__LINE__, "Checkpoint error on step %d\n", step );
 +    }
 +#endif /* end GMX_FAHCORE block */
 +}
 +
 +static void print_flag_mismatch(FILE *fplog,int sflags,int fflags)
 +{
 +    int i;
 +    
 +    fprintf(fplog,"\nState entry mismatch between the simulation and the checkpoint file\n");
 +    fprintf(fplog,"Entries which are not present in the checkpoint file will not be updated\n");
 +    fprintf(fplog,"  %24s    %11s    %11s\n","","simulation","checkpoint");
 +    for(i=0; i<estNR; i++)
 +    {
 +        if ((sflags & (1<<i)) || (fflags & (1<<i)))
 +        {
 +            fprintf(fplog,"  %24s    %11s    %11s\n",
 +                    est_names[i],
 +                    (sflags & (1<<i)) ? "  present  " : "not present",
 +                    (fflags & (1<<i)) ? "  present  " : "not present");
 +        }
 +    }
 +}
 +
 +static void check_int(FILE *fplog,const char *type,int p,int f,gmx_bool *mm)
 +{
 +      FILE *fp = fplog ? fplog : stderr;
 +
 +    if (p != f)
 +    {
 +              fprintf(fp,"  %s mismatch,\n",type);
 +              fprintf(fp,"    current program: %d\n",p);
 +              fprintf(fp,"    checkpoint file: %d\n",f);
 +              fprintf(fp,"\n");
 +        *mm = TRUE;
 +    }
 +}
 +
 +static void check_string(FILE *fplog,const char *type,const char *p,
 +                         const char *f,gmx_bool *mm)
 +{
 +      FILE *fp = fplog ? fplog : stderr;
 +      
 +    if (strcmp(p,f) != 0)
 +    {
 +              fprintf(fp,"  %s mismatch,\n",type);
 +              fprintf(fp,"    current program: %s\n",p);
 +              fprintf(fp,"    checkpoint file: %s\n",f);
 +              fprintf(fp,"\n");
 +        *mm = TRUE;
 +    }
 +}
 +
 +static void check_match(FILE *fplog,
 +                        char *version,
 +                        char *btime,char *buser,char *bhost,int double_prec,
 +                        char *fprog,
 +                        t_commrec *cr,gmx_bool bPartDecomp,int npp_f,int npme_f,
 +                        ivec dd_nc,ivec dd_nc_f)
 +{
 +    int  npp;
 +    gmx_bool mm;
 +    
 +    mm = FALSE;
 +    
 +    check_string(fplog,"Version"      ,VERSION      ,version,&mm);
 +    check_string(fplog,"Build time"   ,BUILD_TIME   ,btime  ,&mm);
 +    check_string(fplog,"Build user"   ,BUILD_USER   ,buser  ,&mm);
 +    check_string(fplog,"Build host"   ,BUILD_HOST   ,bhost  ,&mm);
 +    check_int   (fplog,"Double prec." ,GMX_CPT_BUILD_DP,double_prec,&mm);
 +    check_string(fplog,"Program name" ,Program()    ,fprog  ,&mm);
 +    
 +    check_int   (fplog,"#nodes"       ,cr->nnodes   ,npp_f+npme_f ,&mm);
 +    if (bPartDecomp)
 +    {
 +        dd_nc[XX] = 1;
 +        dd_nc[YY] = 1;
 +        dd_nc[ZZ] = 1;
 +    }
 +    if (cr->nnodes > 1)
 +    {
 +        check_int (fplog,"#PME-nodes"  ,cr->npmenodes,npme_f     ,&mm);
 +
 +        npp = cr->nnodes;
 +        if (cr->npmenodes >= 0)
 +        {
 +            npp -= cr->npmenodes;
 +        }
 +        if (npp == npp_f)
 +        {
 +            check_int (fplog,"#DD-cells[x]",dd_nc[XX]    ,dd_nc_f[XX],&mm);
 +            check_int (fplog,"#DD-cells[y]",dd_nc[YY]    ,dd_nc_f[YY],&mm);
 +            check_int (fplog,"#DD-cells[z]",dd_nc[ZZ]    ,dd_nc_f[ZZ],&mm);
 +        }
 +    }
 +    
 +    if (mm)
 +    {
 +              fprintf(stderr,
 +                              "Gromacs binary or parallel settings not identical to previous run.\n"
 +                              "Continuation is exact, but is not guaranteed to be binary identical%s.\n\n",
 +                              fplog ? ",\n see the log file for details" : "");
 +              
 +        if (fplog)
 +        {
 +                      fprintf(fplog,
 +                                      "Gromacs binary or parallel settings not identical to previous run.\n"
 +                                      "Continuation is exact, but is not guaranteed to be binary identical.\n\n");
 +              }
 +    }
 +}
 +
 +static void read_checkpoint(const char *fn,FILE **pfplog,
 +                            t_commrec *cr,gmx_bool bPartDecomp,ivec dd_nc,
 +                            int eIntegrator, int *init_fep_state, gmx_large_int_t *step,double *t,
 +                            t_state *state,gmx_bool *bReadRNG,gmx_bool *bReadEkin,
 +                            int *simulation_part,
 +                            gmx_bool bAppendOutputFiles,gmx_bool bForceAppend)
 +{
 +    t_fileio *fp;
 +    int  i,j,rc;
 +    int  file_version;
 +    char *version,*btime,*buser,*bhost,*fprog,*ftime;
 +    int  double_prec;
 +      char filename[STRLEN],buf[STEPSTRSIZE];
 +    int  nppnodes,eIntegrator_f,nppnodes_f,npmenodes_f;
 +    ivec dd_nc_f;
 +    int  natoms,ngtc,nnhpres,nhchainlength,nlambda,fflags,flags_eks,flags_enh,flags_dfh;
 +    int  d;
 +    int  ret;
 +    gmx_file_position_t *outputfiles;
 +    int  nfiles;
 +    t_fileio *chksum_file;
 +    FILE* fplog = *pfplog;
 +    unsigned char digest[16];
 +#ifndef GMX_NATIVE_WINDOWS
 +    struct flock fl;  /* don't initialize here: the struct order is OS 
 +                         dependent! */
 +#endif
 +
 +    const char *int_warn=
 +              "WARNING: The checkpoint file was generated with integrator %s,\n"
 +              "         while the simulation uses integrator %s\n\n";
 +    const char *sd_note=
 +        "NOTE: The checkpoint file was for %d nodes doing SD or BD,\n"
 +        "      while the simulation uses %d SD or BD nodes,\n"
 +        "      continuation will be exact, except for the random state\n\n";
 +    
 +#ifndef GMX_NATIVE_WINDOWS
 +    fl.l_type=F_WRLCK;
 +    fl.l_whence=SEEK_SET;
 +    fl.l_start=0;
 +    fl.l_len=0;
 +    fl.l_pid=0;
 +#endif
 +
 +    if (PARTDECOMP(cr))
 +    {
 +        gmx_fatal(FARGS,
 +                  "read_checkpoint not (yet) supported with particle decomposition");
 +    }
 +    
 +    fp = gmx_fio_open(fn,"r");
 +    do_cpt_header(gmx_fio_getxdr(fp),TRUE,&file_version,
 +                  &version,&btime,&buser,&bhost,&double_prec,&fprog,&ftime,
 +                  &eIntegrator_f,simulation_part,step,t,
 +                  &nppnodes_f,dd_nc_f,&npmenodes_f,
 +                  &natoms,&ngtc,&nnhpres,&nhchainlength,&nlambda,
-                   &(state->dfhist.nlambda),&state->flags,&flags_eks,&flags_enh,&flags_dfh,NULL);
++                  &fflags,&flags_eks,&flags_enh,&flags_dfh,
++                  &state->edsamstate.nED,NULL);
 +
 +    if (bAppendOutputFiles &&
 +        file_version >= 13 && double_prec != GMX_CPT_BUILD_DP)
 +    {
 +        gmx_fatal(FARGS,"Output file appending requested, but the code and checkpoint file precision (single/double) don't match");
 +    }
 +    
 +    if (cr == NULL || MASTER(cr))
 +    {
 +        fprintf(stderr,"\nReading checkpoint file %s generated: %s\n\n",
 +                fn,ftime);
 +    }
 +      
 +      /* This will not be written if we do appending, since fplog is still NULL then */
 +    if (fplog)
 +    {
 +        fprintf(fplog,"\n");
 +        fprintf(fplog,"Reading checkpoint file %s\n",fn);
 +        fprintf(fplog,"  file generated by:     %s\n",fprog);  
 +        fprintf(fplog,"  file generated at:     %s\n",ftime);  
 +        fprintf(fplog,"  GROMACS build time:    %s\n",btime);  
 +        fprintf(fplog,"  GROMACS build user:    %s\n",buser);  
 +        fprintf(fplog,"  GROMACS build host:    %s\n",bhost);
 +        fprintf(fplog,"  GROMACS double prec.:  %d\n",double_prec);
 +        fprintf(fplog,"  simulation part #:     %d\n",*simulation_part);
 +        fprintf(fplog,"  step:                  %s\n",gmx_step_str(*step,buf));
 +        fprintf(fplog,"  time:                  %f\n",*t);  
 +        fprintf(fplog,"\n");
 +    }
 +    
 +    if (natoms != state->natoms)
 +    {
 +        gmx_fatal(FARGS,"Checkpoint file is for a system of %d atoms, while the current system consists of %d atoms",natoms,state->natoms);
 +    }
 +    if (ngtc != state->ngtc)
 +    {
 +        gmx_fatal(FARGS,"Checkpoint file is for a system of %d T-coupling groups, while the current system consists of %d T-coupling groups",ngtc,state->ngtc);
 +    }
 +    if (nnhpres != state->nnhpres)
 +    {
 +        gmx_fatal(FARGS,"Checkpoint file is for a system of %d NH-pressure-coupling variables, while the current system consists of %d NH-pressure-coupling variables",nnhpres,state->nnhpres);
 +    }
 +
 +    if (nlambda != state->dfhist.nlambda)
 +    {
 +        gmx_fatal(FARGS,"Checkpoint file is for a system with %d lambda states, while the current system consists of %d lambda states",nlambda,state->dfhist.nlambda);
 +    }
 +
 +    init_gtc_state(state,state->ngtc,state->nnhpres,nhchainlength); /* need to keep this here to keep the tpr format working */
 +    /* write over whatever was read; we use the number of Nose-Hoover chains from the checkpoint */
 +    
 +    if (eIntegrator_f != eIntegrator)
 +    {
 +        if (MASTER(cr))
 +        {
 +            fprintf(stderr,int_warn,EI(eIntegrator_f),EI(eIntegrator));
 +        }
 +              if(bAppendOutputFiles)
 +              {
 +                      gmx_fatal(FARGS,
 +                                        "Output file appending requested, but input/checkpoint integrators do not match.\n"
 +                                        "Stopping the run to prevent you from ruining all your data...\n"
 +                                        "If you _really_ know what you are doing, try with the -noappend option.\n");
 +              }
 +        if (fplog)
 +        {
 +            fprintf(fplog,int_warn,EI(eIntegrator_f),EI(eIntegrator));
 +        }
 +    }
 +
 +    if (!PAR(cr))
 +    {
 +        nppnodes = 1;
 +        cr->npmenodes = 0;
 +    }
 +    else if (bPartDecomp)
 +    {
 +        nppnodes = cr->nnodes;
 +        cr->npmenodes = 0;
 +    }
 +    else if (cr->nnodes == nppnodes_f + npmenodes_f)
 +    {
 +        if (cr->npmenodes < 0)
 +        {
 +            cr->npmenodes = npmenodes_f;
 +        }
 +        nppnodes = cr->nnodes - cr->npmenodes;
 +        if (nppnodes == nppnodes_f)
 +        {
 +            for(d=0; d<DIM; d++)
 +            {
 +                if (dd_nc[d] == 0)
 +                {
 +                    dd_nc[d] = dd_nc_f[d];
 +                }
 +            }
 +        }
 +    }
 +    else
 +    {
 +        /* The number of PP nodes has not been set yet */
 +        nppnodes = -1;
 +    }
 +
 +    if ((EI_SD(eIntegrator) || eIntegrator == eiBD) && nppnodes > 0)
 +    {
 +        /* Correct the RNG state size for the number of PP nodes.
 +         * Such assignments should all be moved to one central function.
 +         */
 +        state->nrng  = nppnodes*gmx_rng_n();
 +        state->nrngi = nppnodes;
 +    }
 +    
 +    *bReadRNG = TRUE;
 +    if (fflags != state->flags)
 +    {
 +              
 +        if (MASTER(cr))
 +        {
 +                      if(bAppendOutputFiles)
 +                      {
 +                              gmx_fatal(FARGS,
 +                                                "Output file appending requested, but input and checkpoint states are not identical.\n"
 +                                                "Stopping the run to prevent you from ruining all your data...\n"
 +                                                "You can try with the -noappend option, and get more info in the log file.\n");
 +                      }
 +                      
 +            if (getenv("GMX_ALLOW_CPT_MISMATCH") == NULL)
 +            {
 +                gmx_fatal(FARGS,"You seem to have switched ensemble, integrator, T and/or P-coupling algorithm between the cpt and tpr file. The recommended way of doing this is passing the cpt file to grompp (with option -t) instead of to mdrun. If you know what you are doing, you can override this error by setting the env.var. GMX_ALLOW_CPT_MISMATCH");
 +            }
 +            else
 +            {
 +                fprintf(stderr,
 +                        "WARNING: The checkpoint state entries do not match the simulation,\n"
 +                        "         see the log file for details\n\n");
 +            }
 +        }
 +              
 +              if(fplog)
 +              {
 +                      print_flag_mismatch(fplog,state->flags,fflags);
 +              }
 +    }
 +    else
 +    {
 +        if ((EI_SD(eIntegrator) || eIntegrator == eiBD) &&
 +            nppnodes != nppnodes_f)
 +        {
 +            *bReadRNG = FALSE;
 +            if (MASTER(cr))
 +            {
 +                fprintf(stderr,sd_note,nppnodes_f,nppnodes);
 +            }
 +            if (fplog)
 +            {
 +                fprintf(fplog ,sd_note,nppnodes_f,nppnodes);
 +            }
 +        }
 +        if (MASTER(cr))
 +        {
 +            check_match(fplog,version,btime,buser,bhost,double_prec,fprog,
 +                        cr,bPartDecomp,nppnodes_f,npmenodes_f,dd_nc,dd_nc_f);
 +        }
 +    }
 +    ret = do_cpt_state(gmx_fio_getxdr(fp),TRUE,fflags,state,*bReadRNG,NULL);
 +    *init_fep_state = state->fep_state;  /* there should be a better way to do this than setting it here.
 +                                            Investigate for 5.0. */
 +    if (ret)
 +    {
 +        cp_error();
 +    }
 +    ret = do_cpt_ekinstate(gmx_fio_getxdr(fp),TRUE,
 +                           flags_eks,&state->ekinstate,NULL);
 +    if (ret)
 +    {
 +        cp_error();
 +    }
 +    *bReadEkin = ((flags_eks & (1<<eeksEKINH)) || (flags_eks & (1<<eeksEKINF)) || (flags_eks & (1<<eeksEKINO)) ||
 +                  ((flags_eks & (1<<eeksEKINSCALEF)) | (flags_eks & (1<<eeksEKINSCALEH)) | (flags_eks & (1<<eeksVSCALE))));
 +    
 +    ret = do_cpt_enerhist(gmx_fio_getxdr(fp),TRUE,
 +                          flags_enh,&state->enerhist,NULL);
 +    if (ret)
 +    {
 +        cp_error();
 +    }
 +
++    ret = do_cpt_EDstate(gmx_fio_getxdr(fp),TRUE,&state->edsamstate,NULL);
++    if (ret)
++    {
++        cp_error();
++    }
++
 +    if (file_version < 6)
 +    {
 +        const char *warn="Reading checkpoint file in old format, assuming that the run that generated this file started at step 0, if this is not the case the averages stored in the energy file will be incorrect.";
 +
 +        fprintf(stderr,"\nWARNING: %s\n\n",warn);
 +        if (fplog)
 +        {
 +            fprintf(fplog,"\nWARNING: %s\n\n",warn);
 +        }
 +        state->enerhist.nsum     = *step;
 +        state->enerhist.nsum_sim = *step;
 +    }
 +
 +    ret = do_cpt_df_hist(gmx_fio_getxdr(fp),TRUE,
 +                         flags_dfh,&state->dfhist,NULL);
 +    if (ret)
 +    {
 +        cp_error();
 +    }
 +
 +      ret = do_cpt_files(gmx_fio_getxdr(fp),TRUE,&outputfiles,&nfiles,NULL,file_version);
 +      if (ret)
 +      {
 +              cp_error();
 +      }
 +                                         
 +    ret = do_cpt_footer(gmx_fio_getxdr(fp),TRUE,file_version);
 +    if (ret)
 +    {
 +        cp_error();
 +    }
 +    if( gmx_fio_close(fp) != 0)
 +      {
 +        gmx_file("Cannot read/write checkpoint; corrupt file, or maybe you are out of disk space?");
 +      }
 +    
 +    sfree(fprog);
 +    sfree(ftime);
 +    sfree(btime);
 +    sfree(buser);
 +    sfree(bhost);
 +      
 +      /* If the user wants to append to output files,
 +     * we use the file pointer positions of the output files stored
 +     * in the checkpoint file and truncate the files such that any frames
 +     * written after the checkpoint time are removed.
 +     * All files are md5sum checked such that we can be sure that
 +     * we do not truncate other (maybe imprortant) files.
 +       */
 +    if (bAppendOutputFiles)
 +    {
 +        if (fn2ftp(outputfiles[0].filename)!=efLOG)
 +        {
 +            /* make sure first file is log file so that it is OK to use it for 
 +             * locking
 +             */
 +            gmx_fatal(FARGS,"The first output file should always be the log "
 +                      "file but instead is: %s. Cannot do appending because of this condition.", outputfiles[0].filename);
 +        }
 +        for(i=0;i<nfiles;i++)
 +        {
 +            if (outputfiles[i].offset < 0)
 +            {
 +                gmx_fatal(FARGS,"The original run wrote a file called '%s' which "
 +                    "is larger than 2 GB, but mdrun did not support large file"
 +                    " offsets. Can not append. Run mdrun with -noappend",
 +                    outputfiles[i].filename);
 +            }
 +#ifdef GMX_FAHCORE
 +            chksum_file=gmx_fio_open(outputfiles[i].filename,"a");
 +
 +#else
 +            chksum_file=gmx_fio_open(outputfiles[i].filename,"r+");
 +
 +            /* lock log file */                
 +            if (i==0)
 +            {
 +                /* Note that there are systems where the lock operation
 +                 * will succeed, but a second process can also lock the file.
 +                 * We should probably try to detect this.
 +                 */
 +#ifndef GMX_NATIVE_WINDOWS
 +                if (fcntl(fileno(gmx_fio_getfp(chksum_file)), F_SETLK, &fl)
 +                    ==-1)
 +#else
 +                if (_locking(fileno(gmx_fio_getfp(chksum_file)), _LK_NBLCK, LONG_MAX)==-1)
 +#endif
 +                {
 +                    if (errno == ENOSYS)
 +                    {
 +                        if (!bForceAppend)
 +                        {
 +                            gmx_fatal(FARGS,"File locking is not supported on this system. Use -noappend or specify -append explicitly to append anyhow.");
 +                        }
 +                        else
 +                        {
 +                            fprintf(stderr,"\nNOTE: File locking is not supported on this system, will not lock %s\n\n",outputfiles[i].filename);
 +                            if (fplog)
 +                            {
 +                                fprintf(fplog,"\nNOTE: File locking not supported on this system, will not lock %s\n\n",outputfiles[i].filename);
 +                            }
 +                        }
 +                    }
 +                    else if (errno == EACCES || errno == EAGAIN)
 +                    {
 +                        gmx_fatal(FARGS,"Failed to lock: %s. Already running "
 +                                  "simulation?", outputfiles[i].filename);
 +                    }
 +                    else
 +                    {
 +                        gmx_fatal(FARGS,"Failed to lock: %s. %s.",
 +                                  outputfiles[i].filename, strerror(errno));
 +                    }
 +                }
 +            }
 +            
 +            /* compute md5 chksum */ 
 +            if (outputfiles[i].chksum_size != -1)
 +            {
 +                if (gmx_fio_get_file_md5(chksum_file,outputfiles[i].offset,
 +                                     digest) != outputfiles[i].chksum_size)  /*at the end of the call the file position is at the end of the file*/
 +                {
 +                    gmx_fatal(FARGS,"Can't read %d bytes of '%s' to compute checksum. The file has been replaced or its contents have been modified. Cannot do appending because of this condition.",
 +                              outputfiles[i].chksum_size, 
 +                              outputfiles[i].filename);
 +                }
 +            } 
 +            if (i==0)  /*log file needs to be seeked in case we need to truncate (other files are truncated below)*/
 +            {
 +                if (gmx_fio_seek(chksum_file,outputfiles[i].offset))
 +                {
 +                      gmx_fatal(FARGS,"Seek error! Failed to truncate log-file: %s.", strerror(errno));
 +                }
 +            }
 +#endif
 +            
 +            if (i==0) /*open log file here - so that lock is never lifted 
 +                        after chksum is calculated */
 +            {
 +                *pfplog = gmx_fio_getfp(chksum_file);
 +            }
 +            else
 +            {
 +                gmx_fio_close(chksum_file);
 +            }
 +#ifndef GMX_FAHCORE            
 +            /* compare md5 chksum */
 +            if (outputfiles[i].chksum_size != -1 &&
 +                memcmp(digest,outputfiles[i].chksum,16)!=0) 
 +            {
 +                if (debug)
 +                {
 +                    fprintf(debug,"chksum for %s: ",outputfiles[i].filename);
 +                    for (j=0; j<16; j++)
 +                    {
 +                        fprintf(debug,"%02x",digest[j]);
 +                    }
 +                    fprintf(debug,"\n");
 +                }
 +                gmx_fatal(FARGS,"Checksum wrong for '%s'. The file has been replaced or its contents have been modified. Cannot do appending because of this condition.",
 +                          outputfiles[i].filename);
 +            }
 +#endif        
 +
 +              
 +            if (i!=0) /*log file is already seeked to correct position */
 +            {
 +#ifdef GMX_NATIVE_WINDOWS
 +                rc = gmx_wintruncate(outputfiles[i].filename,outputfiles[i].offset);
 +#else            
 +                rc = truncate(outputfiles[i].filename,outputfiles[i].offset);
 +#endif
 +                if(rc!=0)
 +                {
 +                    gmx_fatal(FARGS,"Truncation of file %s failed. Cannot do appending because of this failure.",outputfiles[i].filename);
 +                }
 +            }
 +        }
 +    }
 +
 +    sfree(outputfiles);
 +}
 +
 +
 +void load_checkpoint(const char *fn,FILE **fplog,
 +                     t_commrec *cr,gmx_bool bPartDecomp,ivec dd_nc,
 +                     t_inputrec *ir,t_state *state,
 +                     gmx_bool *bReadRNG,gmx_bool *bReadEkin,
 +                     gmx_bool bAppend,gmx_bool bForceAppend)
 +{
 +    gmx_large_int_t step;
 +    double t;
 +
 +    if (SIMMASTER(cr)) {
 +      /* Read the state from the checkpoint file */
 +      read_checkpoint(fn,fplog,
 +                      cr,bPartDecomp,dd_nc,
 +                      ir->eI,&(ir->fepvals->init_fep_state),&step,&t,state,bReadRNG,bReadEkin,
 +                      &ir->simulation_part,bAppend,bForceAppend);
 +    }
 +    if (PAR(cr)) {
 +      gmx_bcast(sizeof(cr->npmenodes),&cr->npmenodes,cr);
 +      gmx_bcast(DIM*sizeof(dd_nc[0]),dd_nc,cr);
 +      gmx_bcast(sizeof(step),&step,cr);
 +      gmx_bcast(sizeof(*bReadRNG),bReadRNG,cr);
 +      gmx_bcast(sizeof(*bReadEkin),bReadEkin,cr);
 +    }
 +    ir->bContinuation    = TRUE;
 +    if (ir->nsteps >= 0)
 +    {
 +        ir->nsteps          += ir->init_step - step;
 +    }
 +    ir->init_step        = step;
 +      ir->simulation_part += 1;
 +}
 +
 +static void read_checkpoint_data(t_fileio *fp,int *simulation_part,
 +                                 gmx_large_int_t *step,double *t,t_state *state,
 +                                 gmx_bool bReadRNG,
 +                                 int *nfiles,gmx_file_position_t **outputfiles)
 +{
 +    int  file_version;
 +    char *version,*btime,*buser,*bhost,*fprog,*ftime;
 +    int  double_prec;
 +    int  eIntegrator;
 +    int  nppnodes,npme;
 +    ivec dd_nc;
 +    int  flags_eks,flags_enh,flags_dfh;
 +    int  nfiles_loc;
 +    gmx_file_position_t *files_loc=NULL;
 +    int  ret;
 +      
 +    do_cpt_header(gmx_fio_getxdr(fp),TRUE,&file_version,
 +                  &version,&btime,&buser,&bhost,&double_prec,&fprog,&ftime,
 +                  &eIntegrator,simulation_part,step,t,&nppnodes,dd_nc,&npme,
 +                  &state->natoms,&state->ngtc,&state->nnhpres,&state->nhchainlength,
-                   &flags_eks,&flags_enh,&flags_dfh,out);
++                  &(state->dfhist.nlambda),&state->flags,&flags_eks,&flags_enh,&flags_dfh,
++                  &state->edsamstate.nED,NULL);
 +    ret =
 +        do_cpt_state(gmx_fio_getxdr(fp),TRUE,state->flags,state,bReadRNG,NULL);
 +    if (ret)
 +    {
 +        cp_error();
 +    }
 +    ret = do_cpt_ekinstate(gmx_fio_getxdr(fp),TRUE,
 +                           flags_eks,&state->ekinstate,NULL);
 +    if (ret)
 +    {
 +        cp_error();
 +    }
 +    ret = do_cpt_enerhist(gmx_fio_getxdr(fp),TRUE,
 +                          flags_enh,&state->enerhist,NULL);
 +    if (ret)
 +    {
 +        cp_error();
 +    }
 +    ret = do_cpt_df_hist(gmx_fio_getxdr(fp),TRUE,
 +                          flags_dfh,&state->dfhist,NULL);
 +    if (ret)
 +    {
 +        cp_error();
 +    }
 +
++    ret = do_cpt_EDstate(gmx_fio_getxdr(fp),TRUE,&state->edsamstate,NULL);
++    if (ret)
++    {
++        cp_error();
++    }
++
 +    ret = do_cpt_files(gmx_fio_getxdr(fp),TRUE,
 +                       outputfiles != NULL ? outputfiles : &files_loc,
 +                       outputfiles != NULL ? nfiles : &nfiles_loc,
 +                       NULL,file_version);
 +    if (files_loc != NULL)
 +    {
 +        sfree(files_loc);
 +    }
 +      
 +    if (ret)
 +    {
 +        cp_error();
 +    }
 +      
 +    ret = do_cpt_footer(gmx_fio_getxdr(fp),TRUE,file_version);
 +    if (ret)
 +    {
 +        cp_error();
 +    }
 +
 +    sfree(fprog);
 +    sfree(ftime);
 +    sfree(btime);
 +    sfree(buser);
 +    sfree(bhost);
 +}
 +
 +void 
 +read_checkpoint_state(const char *fn,int *simulation_part,
 +                      gmx_large_int_t *step,double *t,t_state *state)
 +{
 +    t_fileio *fp;
 +    
 +    fp = gmx_fio_open(fn,"r");
 +    read_checkpoint_data(fp,simulation_part,step,t,state,FALSE,NULL,NULL);
 +    if( gmx_fio_close(fp) != 0)
 +      {
 +        gmx_file("Cannot read/write checkpoint; corrupt file, or maybe you are out of disk space?");
 +      }
 +}
 +
 +void read_checkpoint_trxframe(t_fileio *fp,t_trxframe *fr)
 +{
 +    t_state state;
 +    int simulation_part;
 +    gmx_large_int_t step;
 +    double t;
 +    
 +    init_state(&state,0,0,0,0,0);
 +    
 +    read_checkpoint_data(fp,&simulation_part,&step,&t,&state,FALSE,NULL,NULL);
 +    
 +    fr->natoms  = state.natoms;
 +    fr->bTitle  = FALSE;
 +    fr->bStep   = TRUE;
 +    fr->step    = gmx_large_int_to_int(step,
 +                                    "conversion of checkpoint to trajectory");
 +    fr->bTime   = TRUE;
 +    fr->time    = t;
 +    fr->bLambda = TRUE;
 +    fr->lambda  = state.lambda[efptFEP];
 +    fr->fep_state  = state.fep_state;
 +    fr->bAtoms  = FALSE;
 +    fr->bX      = (state.flags & (1<<estX));
 +    if (fr->bX)
 +    {
 +        fr->x     = state.x;
 +        state.x   = NULL;
 +    }
 +    fr->bV      = (state.flags & (1<<estV));
 +    if (fr->bV)
 +    {
 +        fr->v     = state.v;
 +        state.v   = NULL;
 +    }
 +    fr->bF      = FALSE;
 +    fr->bBox    = (state.flags & (1<<estBOX));
 +    if (fr->bBox)
 +    {
 +        copy_mat(state.box,fr->box);
 +    }
 +    done_state(&state);
 +}
 +
 +void list_checkpoint(const char *fn,FILE *out)
 +{
 +    t_fileio *fp;
 +    int  file_version;
 +    char *version,*btime,*buser,*bhost,*fprog,*ftime;
 +    int  double_prec;
 +    int  eIntegrator,simulation_part,nppnodes,npme;
 +    gmx_large_int_t step;
 +    double t;
 +    ivec dd_nc;
 +    t_state state;
 +    int  flags_eks,flags_enh,flags_dfh;
 +    int  indent;
 +    int  i,j;
 +    int  ret;
 +    gmx_file_position_t *outputfiles;
 +      int  nfiles;
 +      
 +    init_state(&state,-1,-1,-1,-1,0);
 +
 +    fp = gmx_fio_open(fn,"r");
 +    do_cpt_header(gmx_fio_getxdr(fp),TRUE,&file_version,
 +                  &version,&btime,&buser,&bhost,&double_prec,&fprog,&ftime,
 +                  &eIntegrator,&simulation_part,&step,&t,&nppnodes,dd_nc,&npme,
 +                  &state.natoms,&state.ngtc,&state.nnhpres,&state.nhchainlength,
 +                  &(state.dfhist.nlambda),&state.flags,
++                  &flags_eks,&flags_enh,&flags_dfh,&state.edsamstate.nED,out);
 +    ret = do_cpt_state(gmx_fio_getxdr(fp),TRUE,state.flags,&state,TRUE,out);
 +    if (ret)
 +    {
 +        cp_error();
 +    }
 +    ret = do_cpt_ekinstate(gmx_fio_getxdr(fp),TRUE,
 +                           flags_eks,&state.ekinstate,out);
 +    if (ret)
 +    {
 +        cp_error();
 +    }
 +    ret = do_cpt_enerhist(gmx_fio_getxdr(fp),TRUE,
 +                          flags_enh,&state.enerhist,out);
 +
 +    if (ret == 0)
 +    {
 +        init_df_history(&state.dfhist,state.dfhist.nlambda,0); /* reinitialize state with correct sizes */
 +        ret = do_cpt_df_hist(gmx_fio_getxdr(fp),TRUE,
 +                             flags_dfh,&state.dfhist,out);
 +    }
++
++    if (ret == 0)
++    {
++        ret = do_cpt_EDstate(gmx_fio_getxdr(fp),TRUE,&state.edsamstate,out);
++    }
++
 +    if (ret == 0)
 +    {
 +              do_cpt_files(gmx_fio_getxdr(fp),TRUE,&outputfiles,&nfiles,out,file_version);
 +      }
 +      
 +    if (ret == 0)
 +    {
 +        ret = do_cpt_footer(gmx_fio_getxdr(fp),TRUE,file_version);
 +    }
 +      
 +    if (ret)
 +    {
 +        cp_warning(out);
 +    }
 +    if( gmx_fio_close(fp) != 0)
 +      {
 +        gmx_file("Cannot read/write checkpoint; corrupt file, or maybe you are out of disk space?");
 +      }
 +    
 +    done_state(&state);
 +}
 +
 +
 +static gmx_bool exist_output_file(const char *fnm_cp,int nfile,const t_filenm fnm[])
 +{
 +    int i;
 +
 +    /* Check if the output file name stored in the checkpoint file
 +     * is one of the output file names of mdrun.
 +     */
 +    i = 0;
 +    while (i < nfile &&
 +           !(is_output(&fnm[i]) && strcmp(fnm_cp,fnm[i].fns[0]) == 0))
 +    {
 +        i++;
 +    }
 +    
 +    return (i < nfile && gmx_fexist(fnm_cp));
 +}
 +
 +/* This routine cannot print tons of data, since it is called before the log file is opened. */
 +gmx_bool read_checkpoint_simulation_part(const char *filename, int *simulation_part,
 +                                     gmx_large_int_t *cpt_step,t_commrec *cr,
 +                                     gmx_bool bAppendReq,
 +                                     int nfile,const t_filenm fnm[],
 +                                     const char *part_suffix,gmx_bool *bAddPart)
 +{
 +    t_fileio *fp;
 +    gmx_large_int_t step=0;
 +      double t;
 +    t_state state;
 +    int  nfiles;
 +    gmx_file_position_t *outputfiles;
 +    int  nexist,f;
 +    gmx_bool bAppend;
 +    char *fn,suf_up[STRLEN];
 +
 +    bAppend = FALSE;
 +
 +    if (SIMMASTER(cr)) {
 +        if(!gmx_fexist(filename) || (!(fp = gmx_fio_open(filename,"r")) ))
 +        {
 +            *simulation_part = 0;
 +        }
 +        else 
 +        {
 +            init_state(&state,0,0,0,0,0);
 +
 +            read_checkpoint_data(fp,simulation_part,&step,&t,&state,FALSE,
 +                                 &nfiles,&outputfiles);
 +            if( gmx_fio_close(fp) != 0)
 +            {
 +                gmx_file("Cannot read/write checkpoint; corrupt file, or maybe you are out of disk space?");
 +            }
 +            done_state(&state);
 +
 +            if (bAppendReq)
 +            {
 +                nexist = 0;
 +                for(f=0; f<nfiles; f++)
 +                {
 +                    if (exist_output_file(outputfiles[f].filename,nfile,fnm))
 +                    {
 +                        nexist++;
 +                    }
 +                }
 +                if (nexist == nfiles)
 +                {
 +                    bAppend = bAppendReq;
 +                }
 +                else if (nexist > 0)
 +                {
 +                    fprintf(stderr,
 +                            "Output file appending has been requested,\n"
 +                            "but some output files listed in the checkpoint file %s\n"
 +                            "are not present or are named differently by the current program:\n",
 +                            filename);
 +                    fprintf(stderr,"output files present:");
 +                    for(f=0; f<nfiles; f++)
 +                    {
 +                        if (exist_output_file(outputfiles[f].filename,
 +                                              nfile,fnm))
 +                        {
 +                            fprintf(stderr," %s",outputfiles[f].filename);
 +                        }
 +                    }
 +                    fprintf(stderr,"\n");
 +                    fprintf(stderr,"output files not present or named differently:");
 +                    for(f=0; f<nfiles; f++)
 +                    {
 +                        if (!exist_output_file(outputfiles[f].filename,
 +                                               nfile,fnm))
 +                        {
 +                            fprintf(stderr," %s",outputfiles[f].filename);
 +                        }
 +                    }
 +                    fprintf(stderr,"\n");
 +                    
 +                    gmx_fatal(FARGS,"File appending requested, but only %d of the %d output files are present",nexist,nfiles);
 +                }
 +            }
 +            
 +            if (bAppend)
 +            {
 +                if (nfiles == 0)
 +                {
 +                    gmx_fatal(FARGS,"File appending requested, but no output file information is stored in the checkpoint file");
 +                }
 +                fn = outputfiles[0].filename;
 +                if (strlen(fn) < 4 ||
 +                    gmx_strcasecmp(fn+strlen(fn)-4,ftp2ext(efLOG)) == 0)
 +                {
 +                    gmx_fatal(FARGS,"File appending requested, but the log file is not the first file listed in the checkpoint file");
 +                }
 +                /* Set bAddPart to whether the suffix string '.part' is present
 +                 * in the log file name.
 +                 */
 +                strcpy(suf_up,part_suffix);
 +                upstring(suf_up);
 +                *bAddPart = (strstr(fn,part_suffix) != NULL ||
 +                             strstr(fn,suf_up) != NULL);
 +            }
 +
 +            sfree(outputfiles);
 +        }
 +    }
 +    if (PAR(cr))
 +    {
 +        gmx_bcast(sizeof(*simulation_part),simulation_part,cr);
 +
 +        if (*simulation_part > 0 && bAppendReq)
 +        {
 +            gmx_bcast(sizeof(bAppend),&bAppend,cr);
 +            gmx_bcast(sizeof(*bAddPart),bAddPart,cr);
 +        }
 +    }
 +    if (NULL != cpt_step)
 +    {
 +        *cpt_step = step;
 +    }
 +
 +    return bAppend;
 +}
index 20b96fe8ac18b17671c89470691fde189562d0e7,0000000000000000000000000000000000000000..734a11164a233335b5c527ed8e457e0fa673a3bc
mode 100644,000000..100644
--- /dev/null
@@@ -1,759 -1,0 +1,748 @@@
- #ifdef GMX_GPU
- #include <cuda.h>
- #include <cuda_runtime_api.h>
- #endif
 +/*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * GROningen Mixture of Alchemy and Childrens' Stories
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#ifdef GMX_THREAD_MPI
 +#include <thread_mpi.h>
 +#endif
 +
 +#ifdef HAVE_LIBMKL
 +#include <mkl.h>
 +#endif
- #ifdef GMX_FAHCORE
 +#ifdef GMX_FFT_FFTW3
 +#include <fftw3.h>
 +#endif
 +
 +/* This file is completely threadsafe - keep it that way! */
 +
 +#include <string.h>
 +#include <ctype.h>
 +#include "sysstuff.h"
 +#include "smalloc.h"
 +#include "string2.h"
 +#include "macros.h"
 +#include <time.h>
 +#include "random.h"
 +#include "statutil.h"
 +#include "copyrite.h"
 +#include "strdb.h"
 +#include "futil.h"
 +#include "vec.h"
 +#include "buildinfo.h"
 +#include "gmx_cpuid.h"
 +
 +static void pr_two(FILE *out,int c,int i)
 +{
 +  if (i < 10)
 +    fprintf(out,"%c0%1d",c,i);
 +  else
 +    fprintf(out,"%c%2d",c,i);
 +}
 +
 +void pr_difftime(FILE *out,double dt)
 +{
 +  int    ndays,nhours,nmins,nsecs;
 +  gmx_bool   bPrint,bPrinted;
 +
 +  ndays = dt/(24*3600);
 +  dt    = dt-24*3600*ndays;
 +  nhours= dt/3600;
 +  dt    = dt-3600*nhours;
 +  nmins = dt/60;
 +  dt    = dt-nmins*60;
 +  nsecs = dt;
 +  bPrint= (ndays > 0);
 +  bPrinted=bPrint;
 +  if (bPrint) 
 +    fprintf(out,"%d",ndays);
 +  bPrint=bPrint || (nhours > 0);
 +  if (bPrint) {
 +    if (bPrinted)
 +      pr_two(out,'d',nhours);
 +    else 
 +      fprintf(out,"%d",nhours);
 +  }
 +  bPrinted=bPrinted || bPrint;
 +  bPrint=bPrint || (nmins > 0);
 +  if (bPrint) {
 +    if (bPrinted)
 +      pr_two(out,'h',nmins);
 +    else 
 +      fprintf(out,"%d",nmins);
 +  }
 +  bPrinted=bPrinted || bPrint;
 +  if (bPrinted)
 +    pr_two(out,':',nsecs);
 +  else
 +    fprintf(out,"%ds",nsecs);
 +  fprintf(out,"\n");
 +}
 +
 +
 +gmx_bool be_cool(void)
 +{
 +  /* Yes, it is bad to check the environment variable every call,
 +   * but we dont call this routine often, and it avoids using 
 +   * a mutex for locking the variable...
 +   */
- #else
-   return (getenv("GMX_NO_QUOTES") == NULL);
++#ifdef GMX_COOL_QUOTES
++  return (getenv("GMX_NO_QUOTES") == NULL);
++#else
 +  /*be uncool*/
 +  return FALSE;
- #ifdef GMX_GPU
-     int cuda_driver,cuda_runtime;
- #endif
 +#endif
 +}
 +
 +void space(FILE *out, int n)
 +{
 +  fprintf(out,"%*s",n,"");
 +}
 +
 +void f(char *a)
 +{
 +    int i;
 +    int len=strlen(a);
 +    
 +    for(i=0;i<len;i++)
 +        a[i]=~a[i]; 
 +}
 +
 +static void sp_print(FILE *out,const char *s)
 +{
 +  int slen;
 +  
 +  slen=strlen(s);
 +  space(out,(80-slen)/2);
 +  fprintf(out,"%s\n",s);
 +}
 +
 +static void ster_print(FILE *out,const char *s)
 +{
 +  int  slen;
 +  char buf[128];
 +  
 +  snprintf(buf,128,":-)  %s  (-:",s);
 +  slen=strlen(buf);
 +  space(out,(80-slen)/2);
 +  fprintf(out,"%s\n",buf);
 +}
 +
 +
 +static void pukeit(const char *db,const char *defstring, char *retstring, 
 +                 int retsize, int *cqnum)
 +{
 +  FILE *fp;
 +  char **help;
 +  int  i,nhlp;
 +  int  seed;
 + 
 +  if (be_cool() && ((fp = low_libopen(db,FALSE)) != NULL)) {
 +    nhlp=fget_lines(fp,&help);
 +    /* for libraries we can use the low-level close routines */
 +    ffclose(fp);
 +    seed=time(NULL);
 +    *cqnum=nhlp*rando(&seed);
 +    if (strlen(help[*cqnum]) >= STRLEN)
 +      help[*cqnum][STRLEN-1] = '\0';
 +    strncpy(retstring,help[*cqnum],retsize);
 +    f(retstring);
 +    for(i=0; (i<nhlp); i++)
 +      sfree(help[i]);
 +    sfree(help);
 +  }
 +  else 
 +    strncpy(retstring,defstring,retsize);
 +}
 +
 +void bromacs(char *retstring, int retsize)
 +{
 +  int dum;
 +
 +  pukeit("bromacs.dat",
 +       "Groningen Machine for Chemical Simulation",
 +       retstring,retsize,&dum);
 +}
 +
 +void cool_quote(char *retstring, int retsize, int *cqnum)
 +{
 +  char *tmpstr;
 +  char *s,*ptr;
 +  int tmpcq,*p;
 +  
 +  if (cqnum!=NULL)
 +    p = cqnum;
 +  else
 +    p = &tmpcq;
 +  
 +  /* protect audience from explicit lyrics */
 +  snew(tmpstr,retsize+1);
 +  pukeit("gurgle.dat","Thanx for Using GROMACS - Have a Nice Day",
 +       tmpstr,retsize-2,p);
 +
 +  if ((ptr = strchr(tmpstr,'_')) != NULL) {
 +    *ptr='\0';
 +    ptr++;
 +    sprintf(retstring,"\"%s\" %s",tmpstr,ptr);
 +  }
 +  else {
 +    strcpy(retstring,tmpstr);
 +  }
 +  sfree(tmpstr);
 +}
 +
 +void CopyRight(FILE *out,const char *szProgram)
 +{
 +  static const char * CopyrightText[] = {
 +             "Written by Emile Apol, Rossen Apostolov, Herman J.C. Berendsen,",
 +             "Aldert van Buuren, Pär Bjelkmar, Rudi van Drunen, Anton Feenstra, ",
 +             "Gerrit Groenhof, Peter Kasson, Per Larsson, Pieter Meulenhoff, ",
 +             "Teemu Murtola, Szilard Pall, Sander Pronk, Roland Schulz, ",
 +             "Michael Shirts, Alfons Sijbers, Peter Tieleman,\n",
 +             "Berk Hess, David van der Spoel, and Erik Lindahl.\n",
 +             "Copyright (c) 1991-2000, University of Groningen, The Netherlands.",
 +             "Copyright (c) 2001-2010, The GROMACS development team at",
 +             "Uppsala University & The Royal Institute of Technology, Sweden.",
 +             "check out http://www.gromacs.org for more information.\n"
 +  };
 +
 +  static const char * LicenseText[] = {
 +              "This program is free software; you can redistribute it and/or",
 +              "modify it under the terms of the GNU Lesser General Public License",
 +              "as published by the Free Software Foundation; either version 2.1",
 +              "of the License, or (at your option) any later version."
 +  };
 +
 +  /* Dont change szProgram arbitrarily - it must be argv[0], i.e. the 
 +   * name of a file. Otherwise, we won't be able to find the library dir.
 +   */
 +#define NCR (int)asize(CopyrightText)
 +/* TODO: Is this exception still needed? */
 +#ifdef GMX_FAHCORE
 +#define NLICENSE 0 /*FAH has an exception permission from GPL to allow digital signatures in Gromacs*/
 +#else
 +#define NLICENSE (int)asize(LicenseText)
 +#endif
 +
 +  char buf[256],tmpstr[1024];
 +  int i;
 +
 +#ifdef GMX_FAHCORE
 +  set_program_name("Gromacs");
 +#else
 +  set_program_name(szProgram);
 +#endif
 +
 +  ster_print(out,"G  R  O  M  A  C  S");
 +  fprintf(out,"\n");
 +  
 +  bromacs(tmpstr,1023);
 +  sp_print(out,tmpstr); 
 +  fprintf(out,"\n");
 +
 +  ster_print(out,GromacsVersion());
 +  fprintf(out,"\n");
 +
 +  /* fprintf(out,"\n");*/
 +
 +  /* sp_print(out,"PLEASE NOTE: THIS IS A BETA VERSION\n");
 +  
 +  fprintf(out,"\n"); */
 +
 +  for(i=0; (i<NCR); i++) 
 +    sp_print(out,CopyrightText[i]);
 +  for(i=0; (i<NLICENSE); i++)
 +    sp_print(out,LicenseText[i]);
 +
 +  fprintf(out,"\n");
 +
 +  snprintf(buf,256,"%s",Program());
 +#ifdef GMX_DOUBLE
 +  strcat(buf," (double precision)");
 +#endif
 +  ster_print(out,buf);
 +  fprintf(out,"\n");
 +}
 +
 +
 +void thanx(FILE *fp)
 +{
 +  char cq[1024];
 +  int  cqnum;
 +
 +  /* protect the audience from suggestive discussions */
 +  cool_quote(cq,1023,&cqnum);
 +  
 +  if (be_cool()) 
 +    fprintf(fp,"\ngcq#%d: %s\n\n",cqnum,cq);
 +  else
 +    fprintf(fp,"\n%s\n\n",cq);
 +}
 +
 +typedef struct {
 +  const char *key;
 +  const char *author;
 +  const char *title;
 +  const char *journal;
 +  int volume,year;
 +  const char *pages;
 +} t_citerec;
 +
 +void please_cite(FILE *fp,const char *key)
 +{
 +  static const t_citerec citedb[] = {
 +    { "Allen1987a",
 +      "M. P. Allen and D. J. Tildesley",
 +      "Computer simulation of liquids",
 +      "Oxford Science Publications",
 +      1, 1987, "1" },
 +    { "Berendsen95a",
 +      "H. J. C. Berendsen, D. van der Spoel and R. van Drunen",
 +      "GROMACS: A message-passing parallel molecular dynamics implementation",
 +      "Comp. Phys. Comm.",
 +      91, 1995, "43-56" },
 +    { "Berendsen84a",
 +      "H. J. C. Berendsen, J. P. M. Postma, A. DiNola and J. R. Haak",
 +      "Molecular dynamics with coupling to an external bath",
 +      "J. Chem. Phys.",
 +      81, 1984, "3684-3690" },
 +    { "Ryckaert77a",
 +      "J. P. Ryckaert and G. Ciccotti and H. J. C. Berendsen",
 +      "Numerical Integration of the Cartesian Equations of Motion of a System with Constraints; Molecular Dynamics of n-Alkanes",
 +      "J. Comp. Phys.",
 +      23, 1977, "327-341" },
 +    { "Miyamoto92a",
 +      "S. Miyamoto and P. A. Kollman",
 +      "SETTLE: An Analytical Version of the SHAKE and RATTLE Algorithms for Rigid Water Models",
 +      "J. Comp. Chem.",
 +      13, 1992, "952-962" },
 +    { "Cromer1968a",
 +      "D. T. Cromer & J. B. Mann",
 +      "X-ray scattering factors computed from numerical Hartree-Fock wave functions",
 +      "Acta Cryst. A",
 +      24, 1968, "321" },
 +    { "Barth95a",
 +      "E. Barth and K. Kuczera and B. Leimkuhler and R. D. Skeel",
 +      "Algorithms for Constrained Molecular Dynamics",
 +      "J. Comp. Chem.",
 +      16, 1995, "1192-1209" },
 +    { "Essmann95a",
 +      "U. Essmann, L. Perera, M. L. Berkowitz, T. Darden, H. Lee and L. G. Pedersen ",
 +      "A smooth particle mesh Ewald method",
 +      "J. Chem. Phys.",
 +      103, 1995, "8577-8592" },
 +    { "Torda89a",
 +      "A. E. Torda and R. M. Scheek and W. F. van Gunsteren",
 +      "Time-dependent distance restraints in molecular dynamics simulations",
 +      "Chem. Phys. Lett.",
 +      157, 1989, "289-294" },
 +    { "Tironi95a",
 +      "I. G. Tironi and R. Sperb and P. E. Smith and W. F. van Gunsteren",
 +      "Generalized reaction field method for molecular dynamics simulations",
 +      "J. Chem. Phys",
 +      102, 1995, "5451-5459" },
 +    { "Hess97a",
 +      "B. Hess and H. Bekker and H. J. C. Berendsen and J. G. E. M. Fraaije",
 +      "LINCS: A Linear Constraint Solver for molecular simulations",
 +      "J. Comp. Chem.",
 +      18, 1997, "1463-1472" },
 +    { "Hess2008a",
 +      "B. Hess",
 +      "P-LINCS: A Parallel Linear Constraint Solver for molecular simulation",
 +      "J. Chem. Theory Comput.",
 +      4, 2008, "116-122" },
 +    { "Hess2008b",
 +      "B. Hess and C. Kutzner and D. van der Spoel and E. Lindahl",
 +      "GROMACS 4: Algorithms for highly efficient, load-balanced, and scalable molecular simulation",
 +      "J. Chem. Theory Comput.",
 +      4, 2008, "435-447" },
 +    { "Hub2010",
 +      "J. S. Hub, B. L. de Groot and D. van der Spoel",
 +      "g_wham - A free weighted histogram analysis implementation including robust error and autocorrelation estimates",
 +      "J. Chem. Theory Comput.",
 +      6, 2010, "3713-3720"}, 
 +    { "In-Chul99a",
 +      "Y. In-Chul and M. L. Berkowitz",
 +      "Ewald summation for systems with slab geometry",
 +      "J. Chem. Phys.",
 +      111, 1999, "3155-3162" },
 +    { "DeGroot97a",
 +      "B. L. de Groot and D. M. F. van Aalten and R. M. Scheek and A. Amadei and G. Vriend and H. J. C. Berendsen",
 +      "Prediction of Protein Conformational Freedom From Distance Constrains",
 +      "Proteins",
 +      29, 1997, "240-251" },
 +    { "Spoel98a",
 +      "D. van der Spoel and P. J. van Maaren and H. J. C. Berendsen",
 +      "A systematic study of water models for molecular simulation. Derivation of models optimized for use with a reaction-field.",
 +      "J. Chem. Phys.",
 +      108, 1998, "10220-10230" },
 +    { "Wishart98a",
 +      "D. S. Wishart and A. M. Nip",
 +      "Protein Chemical Shift Analysis: A Practical Guide",
 +      "Biochem. Cell Biol.",
 +      76, 1998, "153-163" },
 +    { "Maiorov95",
 +      "V. N. Maiorov and G. M. Crippen",
 +      "Size-Independent Comparison of Protein Three-Dimensional Structures",
 +      "PROTEINS: Struct. Funct. Gen.",
 +      22, 1995, "273-283" },
 +    { "Feenstra99",
 +      "K. A. Feenstra and B. Hess and H. J. C. Berendsen",
 +      "Improving Efficiency of Large Time-scale Molecular Dynamics Simulations of Hydrogen-rich Systems",
 +      "J. Comput. Chem.",
 +      20, 1999, "786-798" },
 +    { "Timneanu2004a",
 +      "N. Timneanu and C. Caleman and J. Hajdu and D. van der Spoel",
 +      "Auger Electron Cascades in Water and Ice",
 +      "Chem. Phys.",
 +      299, 2004, "277-283" },
 +    { "Pascal2011a",
 +      "T. A. Pascal and S. T. Lin and W. A. Goddard III",
 +      "Thermodynamics of liquids: standard molar entropies and heat capacities of common solvents from 2PT molecular dynamics",
 +      "Phys. Chem. Chem. Phys.",
 +      13, 2011, "169-181" },
 +    { "Caleman2011b",
 +      "C. Caleman and P. J. van Maaren and M. Hong and J. S. Hub and L. T. da Costa and D. van der Spoel",
 +      "Force Field Benchmark of Organic Liquids: Density, Enthalpy of Vaporization, Heat Capacities, Surface Tension, Isothermal Compressibility, Volumetric Expansion Coefficient, and Dielectric Constant",
 +      "J. Chem. Theo. Comp.",
 +      8, 2012, "61" },
 +    { "Lindahl2001a",
 +      "E. Lindahl and B. Hess and D. van der Spoel",
 +      "GROMACS 3.0: A package for molecular simulation and trajectory analysis",
 +      "J. Mol. Mod.",
 +      7, 2001, "306-317" },
 +    { "Wang2001a",
 +      "J. Wang and W. Wang and S. Huo and M. Lee and P. A. Kollman",
 +      "Solvation model based on weighted solvent accessible surface area",
 +      "J. Phys. Chem. B",
 +      105, 2001, "5055-5067" },
 +    { "Eisenberg86a",
 +      "D. Eisenberg and A. D. McLachlan",
 +      "Solvation energy in protein folding and binding",
 +      "Nature",
 +      319, 1986, "199-203" },
 +    { "Eisenhaber95",
 +      "Frank Eisenhaber and Philip Lijnzaad and Patrick Argos and Chris Sander and Michael Scharf",
 +      "The Double Cube Lattice Method: Efficient Approaches to Numerical Integration of Surface Area and Volume and to Dot Surface Contouring of Molecular Assemblies",
 +      "J. Comp. Chem.",
 +      16, 1995, "273-284" },
 +    { "Hess2002",
 +      "B. Hess, H. Saint-Martin and H.J.C. Berendsen",
 +      "Flexible constraints: an adiabatic treatment of quantum degrees of freedom, with application to the flexible and polarizable MCDHO model for water",
 +      "J. Chem. Phys.",
 +      116, 2002, "9602-9610" },
 +    { "Hetenyi2002b",
 +      "Csaba Hetenyi and David van der Spoel",
 +      "Efficient docking of peptides to proteins without prior knowledge of the binding site.",
 +      "Prot. Sci.",
 +      11, 2002, "1729-1737" },
 +    { "Hess2003",
 +      "B. Hess and R.M. Scheek",
 +      "Orientation restraints in molecular dynamics simulations using time and ensemble averaging",
 +      "J. Magn. Res.",
 +      164, 2003, "19-27" },
 +    { "Rappe1991a",
 +      "A. K. Rappe and W. A. Goddard III",
 +      "Charge Equillibration for Molecular Dynamics Simulations",
 +      "J. Phys. Chem.",
 +      95, 1991, "3358-3363" },
 +    { "Mu2005a",
 +      "Y. Mu, P. H. Nguyen and G. Stock",
 +      "Energy landscape of a small peptide revelaed by dihedral angle principal component analysis",
 +      "Prot. Struct. Funct. Bioinf.",
 +      58, 2005, "45-52" },
 +    { "Okabe2001a",
 +      "T. Okabe and M. Kawata and Y. Okamoto and M. Mikami",
 +      "Replica-exchange {M}onte {C}arlo method for the isobaric-isothermal ensemble",
 +      "Chem. Phys. Lett.",
 +      335, 2001, "435-439" },
 +    { "Hukushima96a",
 +      "K. Hukushima and K. Nemoto",
 +      "Exchange Monte Carlo Method and Application to Spin Glass Simulations",
 +      "J. Phys. Soc. Jpn.",
 +      65, 1996, "1604-1608" },
 +    { "Tropp80a",
 +      "J. Tropp",
 +      "Dipolar Relaxation and Nuclear Overhauser effects in nonrigid molecules: The effect of fluctuating internuclear distances",
 +      "J. Chem. Phys.",
 +      72, 1980, "6035-6043" },
 +    { "Bultinck2002a",
 +       "P. Bultinck and W. Langenaeker and P. Lahorte and F. De Proft and P. Geerlings and M. Waroquier and J. P. Tollenaere",
 +      "The electronegativity equalization method I: Parametrization and validation for atomic charge calculations",
 +      "J. Phys. Chem. A",
 +      106, 2002, "7887-7894" },
 +    { "Yang2006b",
 +      "Q. Y. Yang and K. A. Sharp",
 +      "Atomic charge parameters for the finite difference Poisson-Boltzmann method using electronegativity neutralization",
 +      "J. Chem. Theory Comput.",
 +      2, 2006, "1152-1167" },
 +    { "Spoel2005a",
 +      "D. van der Spoel, E. Lindahl, B. Hess, G. Groenhof, A. E. Mark and H. J. C. Berendsen",
 +      "GROMACS: Fast, Flexible and Free",
 +      "J. Comp. Chem.",
 +      26, 2005, "1701-1719" },
 +    { "Spoel2006b",
 +      "D. van der Spoel, P. J. van Maaren, P. Larsson and N. Timneanu",
 +      "Thermodynamics of hydrogen bonding in hydrophilic and hydrophobic media",
 +      "J. Phys. Chem. B",
 +      110, 2006, "4393-4398" },
 +    { "Spoel2006d",
 +      "D. van der Spoel and M. M. Seibert",
 +      "Protein folding kinetics and thermodynamics from atomistic simulations",
 +      "Phys. Rev. Letters",
 +      96, 2006, "238102" },
 +    { "Palmer94a",
 +      "B. J. Palmer",
 +      "Transverse-current autocorrelation-function calculations of the shear viscosity for molecular liquids",
 +      "Phys. Rev. E",
 +      49, 1994, "359-366" },
 +    { "Bussi2007a",
 +      "G. Bussi, D. Donadio and M. Parrinello",
 +      "Canonical sampling through velocity rescaling",
 +      "J. Chem. Phys.",
 +      126, 2007, "014101" },
 +    { "Hub2006",
 +      "J. S. Hub and B. L. de Groot",
 +      "Does CO2 permeate through Aquaporin-1?",
 +      "Biophys. J.",
 +      91, 2006, "842-848" },
 +    { "Hub2008",
 +      "J. S. Hub and B. L. de Groot",
 +      "Mechanism of selectivity in aquaporins and aquaglyceroporins",
 +      "PNAS",
 +      105, 2008, "1198-1203" },
 +    { "Friedrich2009",
 +      "M. S. Friedrichs, P. Eastman, V. Vaidyanathan, M. Houston, S. LeGrand, A. L. Beberg, D. L. Ensign, C. M. Bruns, and V. S. Pande",
 +      "Accelerating Molecular Dynamic Simulation on Graphics Processing Units",
 +      "J. Comp. Chem.",
 +      30, 2009, "864-872" },
 +    { "Engin2010",
 +      "O. Engin, A. Villa, M. Sayar and B. Hess",
 +      "Driving Forces for Adsorption of Amphiphilic Peptides to Air-Water Interface",
 +      "J. Phys. Chem. B",
 +      114, 2010, "11093" },
 +    { "Fritsch12",
 +      "S. Fritsch, C. Junghans and K. Kremer",
 +      "Adaptive molecular simulation study on structure formation of toluene around C60 using Gromacs",
 +      "J. Chem. Theo. Comp.",
 +      8, 2012, "398" },
 +    { "Junghans10",
 +      "C. Junghans and S. Poblete",
 +      "A reference implementation of the adaptive resolution scheme in ESPResSo",
 +      "Comp. Phys. Comm.",
 +      181, 2010, "1449" },
 +    { "Wang2010",
 +      "H. Wang, F. Dommert, C.Holm",
 +      "Optimizing working parameters of the smooth particle mesh Ewald algorithm in terms of accuracy and efficiency",
 +      "J. Chem. Phys. B",
 +      133, 2010, "034117" },
 +    { "Sugita1999a",
 +      "Y. Sugita, Y. Okamoto",
 +      "Replica-exchange molecular dynamics method for protein folding",
 +      "Chem. Phys. Lett.",
 +      314, 1999, "141-151" },
 +    { "Kutzner2011",
 +      "C. Kutzner and J. Czub and H. Grubmuller",
 +      "Keep it Flexible: Driving Macromolecular Rotary Motions in Atomistic Simulations with GROMACS",
 +      "J. Chem. Theory Comput.",
 +      7, 2011, "1381-1393" },
 +    { "Hoefling2011",
 +      "M. Hoefling, N. Lima, D. Haenni, C.A.M. Seidel, B. Schuler, H. Grubmuller",
 +      "Structural Heterogeneity and Quantitative FRET Efficiency Distributions of Polyprolines through a Hybrid Atomistic Simulation and Monte Carlo Approach",
 +      "PLoS ONE",
 +      6, 2011, "e19791" },
 +    { "Hockney1988",
 +      "R. W. Hockney and J. W. Eastwood",
 +      "Computer simulation using particles",
 +      "IOP, Bristol",
 +      1, 1988, "1" },
 +    { "Ballenegger2012",
 +      "V. Ballenegger, J.J. Cerda, and C. Holm",
 +      "How to Convert SPME to P3M: Influence Functions and Error Estimates",
 +      "J. Chem. Theory Comput.",
 +      8, 2012, "936-947" },
 +    { "Garmay2012",
 +      "Garmay Yu, Shvetsov A, Karelov D, Lebedev D, Radulescu A, Petukhov M, Isaev-Ivanov V",
 +      "Correlated motion of protein subdomains and large-scale conformational flexibility of RecA protein filament",
 +      "Journal of Physics: Conference Series",
 +      340, 2012, "012094" }
 +  };
 +#define NSTR (int)asize(citedb)
 +  
 +  int  j,index;
 +  char *author;
 +  char *title;
 +#define LINE_WIDTH 79
 +  
 +  if (fp == NULL)
 +    return;
 +
 +  for(index=0; (index<NSTR) && (strcmp(citedb[index].key,key) != 0); index++)
 +    ;
 +  
 +  fprintf(fp,"\n++++ PLEASE READ AND CITE THE FOLLOWING REFERENCE ++++\n");
 +  if (index < NSTR) {
 +    /* Insert newlines */
 +    author = wrap_lines(citedb[index].author,LINE_WIDTH,0,FALSE);
 +    title  = wrap_lines(citedb[index].title,LINE_WIDTH,0,FALSE);
 +    fprintf(fp,"%s\n%s\n%s %d (%d) pp. %s\n",
 +          author,title,citedb[index].journal,
 +          citedb[index].volume,citedb[index].year,
 +          citedb[index].pages);
 +    sfree(author);
 +    sfree(title);
 +  }
 +  else {
 +    fprintf(fp,"Entry %s not found in citation database\n",key);
 +  }
 +  fprintf(fp,"-------- -------- --- Thank You --- -------- --------\n\n");
 +  fflush(fp);
 +}
 +
 +#ifdef GMX_GIT_VERSION_INFO
 +/* Version information generated at compile time. */
 +#include "gromacs/utility/gitversion.h"
 +#else
 +/* Fall back to statically defined version. */
 +static const char _gmx_ver_string[]="VERSION " VERSION;
 +#endif
 +
 +const char *GromacsVersion()
 +{
 +  return _gmx_ver_string;
 +}
 +
++void gmx_print_version_info_gpu(FILE *fp);
++
 +void gmx_print_version_info(FILE *fp)
 +{
-     fprintf(fp, "CUDA compiler:      %s\n",CUDA_NVCC_COMPILER_INFO);
-     cuda_driver = 0;
-     cudaDriverGetVersion(&cuda_driver);
-     cuda_runtime = 0;
-     cudaRuntimeGetVersion(&cuda_runtime);
-     fprintf(fp, "CUDA driver:        %d.%d\n",cuda_driver/1000, cuda_driver%100);
-     fprintf(fp, "CUDA runtime:       %d.%d\n",cuda_runtime/1000, cuda_runtime%100);
 +    fprintf(fp, "Gromacs version:    %s\n", _gmx_ver_string);
 +#ifdef GMX_GIT_VERSION_INFO
 +    fprintf(fp, "GIT SHA1 hash:      %s\n", _gmx_full_git_hash);
 +    /* Only print out the branch information if present.
 +     * The generating script checks whether the branch point actually
 +     * coincides with the hash reported above, and produces an empty string
 +     * in such cases. */
 +    if (_gmx_central_base_hash[0] != 0)
 +    {
 +        fprintf(fp, "Branched from:      %s\n", _gmx_central_base_hash);
 +    }
 +#endif
 +
 +#ifdef GMX_DOUBLE
 +    fprintf(fp, "Precision:          double\n");
 +#else
 +    fprintf(fp, "Precision:          single\n");
 +#endif
++    fprintf(fp, "Memory model:       %lu bit\n",8*sizeof(void *));
 +
 +#ifdef GMX_THREAD_MPI
 +    fprintf(fp, "MPI library:        thread_mpi\n");
 +#elif defined(GMX_MPI)
 +    fprintf(fp, "MPI library:        MPI\n");
 +#else
 +    fprintf(fp, "MPI library:        none\n");
 +#endif
 +#ifdef GMX_OPENMP
 +    fprintf(fp, "OpenMP support:     enabled\n");
 +#else
 +    fprintf(fp, "OpenMP support:     disabled\n");
 +#endif
 +#ifdef GMX_GPU
 +    fprintf(fp, "GPU support:        enabled\n");
 +#else
 +    fprintf(fp, "GPU support:        disabled\n");
 +#endif
 +    /* A preprocessor trick to avoid duplicating logic from vec.h */
 +#define gmx_stringify2(x) #x
 +#define gmx_stringify(x) gmx_stringify2(x)
 +    fprintf(fp, "invsqrt routine:    %s\n", gmx_stringify(gmx_invsqrt(x)));
 +    fprintf(fp, "CPU acceleration:   %s\n", GMX_CPU_ACCELERATION_STRING);
 +
 +    /* TODO: Would be nicer to wrap this in a gmx_fft_version() call, but
 +     * since that is currently in mdlib, can wait for master. */
 +#ifdef GMX_FFT_FFTPACK
 +    fprintf(fp, "FFT library:        fftpack (built-in)\n");
 +#elif defined(GMX_FFT_FFTW3) && defined(GMX_NATIVE_WINDOWS)
 +    fprintf(fp, "FFT library:        %s\n", "fftw3");
 +#elif defined(GMX_FFT_FFTW3) && defined(GMX_DOUBLE)
 +    fprintf(fp, "FFT library:        %s\n", fftw_version);
 +#elif defined(GMX_FFT_FFTW3)
 +    fprintf(fp, "FFT library:        %s\n", fftwf_version);
 +#elif defined(GMX_FFT_MKL)
 +    fprintf(fp, "FFT library:        MKL\n");
 +#else
 +    fprintf(fp, "FFT library:        unknown\n");
 +#endif
 +#ifdef GMX_LARGEFILES
 +    fprintf(fp, "Large file support: enabled\n");
 +#else
 +    fprintf(fp, "Large file support: disabled\n");
 +#endif
 +#ifdef HAVE_RDTSCP
 +    fprintf(fp, "RDTSCP usage:       enabled\n");
 +#else
 +    fprintf(fp, "RDTSCP usage:       disabled\n");
 +#endif
 +
 +    fprintf(fp, "Built on:           %s\n", BUILD_TIME);
 +    fprintf(fp, "Built by:           %s\n", BUILD_USER);
 +    fprintf(fp, "Build OS/arch:      %s\n", BUILD_HOST);
 +    fprintf(fp, "Build CPU vendor:   %s\n", BUILD_CPU_VENDOR);
 +    fprintf(fp, "Build CPU brand:    %s\n", BUILD_CPU_BRAND);
 +    fprintf(fp, "Build CPU family:   %d   Model: %d   Stepping: %d\n",
 +            BUILD_CPU_FAMILY, BUILD_CPU_MODEL, BUILD_CPU_STEPPING);
 +    /* TODO: The below strings can be quite long, so it would be nice to wrap
 +     * them. Can wait for later, as the master branch has ready code to do all
 +     * that. */
 +    fprintf(fp, "Build CPU features: %s\n", BUILD_CPU_FEATURES);
 +    fprintf(fp, "C compiler:         %s\n", BUILD_C_COMPILER);
 +    fprintf(fp, "C compiler flags:   %s\n", BUILD_CFLAGS);
 +    if (BUILD_CXX_COMPILER[0] != '\0')
 +    {
 +        fprintf(fp, "C++ compiler:       %s\n", BUILD_CXX_COMPILER);
 +        fprintf(fp, "C++ compiler flags: %s\n", BUILD_CXXFLAGS);
 +    }
 +#ifdef HAVE_LIBMKL
 +    /* MKL might be used for LAPACK/BLAS even if FFTs use FFTW, so keep it separate */
 +    fprintf(fp, "Linked with Intel MKL version %s.%s.%s.\n",
 +            __INTEL_MKL__,__INTEL_MKL_MINOR__,__INTEL_MKL_UPDATE__);
 +#endif
 +#ifdef GMX_GPU
++    gmx_print_version_info_gpu(fp);
 +#endif
 +
 +}
index 0000000000000000000000000000000000000000,001ca53fc2c33920c85610471651c36651dcec01..001ca53fc2c33920c85610471651c36651dcec01
mode 000000,100644..100644
--- /dev/null
index d42f37e11c273556f1423cdc50b5ec6437208b5d,0000000000000000000000000000000000000000..474167a57ec3418347d3e16ae32d2fcb82065bcb
mode 100644,000000..100644
--- /dev/null
@@@ -1,1019 -1,0 +1,1018 @@@
-     { eftASC, ".edo", "sam",    NULL, "ED sampling output"},
 +/*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * GROningen Mixture of Alchemy and Childrens' Stories
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <string.h>
 +#include "sysstuff.h"
 +#include "typedefs.h"
 +#include "smalloc.h"
 +#include "string2.h"
 +#include "gmx_fatal.h"
 +#include "filenm.h"
 +#include "futil.h"
 +#include "wman.h"
 +#include "xdrf.h"
 +#include "macros.h"
 +
 +#ifdef GMX_THREAD_MPI
 +#include "thread_mpi.h"
 +#endif
 +
 +/* NOTE: this was a cesspool of thread-unsafe code, has now been 
 + properly proteced by mutexes (hopefully). */
 +
 +/* XDR should be available on all platforms now, 
 + * but we keep the possibility of turning it off...
 + */
 +#define USE_XDR
 +
 +/* Use bitflag ... */
 +#define IS_SET(fn) ((fn.flag & ffSET) != 0)
 +#define IS_OPT(fn) ((fn.flag & ffOPT) != 0)
 +#define IS_MULT(fn) ((fn.flag & ffMULT) != 0)
 +#define UN_SET(fn) (fn.flag = (fn.flag & ~ffSET))
 +#define DO_SET(fn) (fn.flag = (fn.flag |  ffSET))
 +
 +enum
 +{
 +    eftASC, eftBIN, eftXDR, eftGEN, eftNR
 +};
 +
 +/* To support multiple file types with one general (eg TRX) we have 
 + * these arrays.
 + */
 +static const int trxs[] =
 +    {
 +#ifdef USE_XDR 
 +        efXTC, efTRR, efCPT,
 +#endif
 +        efTRJ, efGRO, efG96, efPDB, efG87 };
 +#define NTRXS asize(trxs)
 +
 +static const int tros[] =
 +    {
 +#ifdef USE_XDR 
 +        efXTC, efTRR,
 +#endif
 +        efTRJ, efGRO, efG96, efPDB, efG87 };
 +#define NTROS asize(tros)
 +
 +static const int trns[] =
 +    {
 +#ifdef USE_XDR
 +        efTRR, efCPT,
 +#endif
 +        efTRJ };
 +#define NTRNS asize(trns)
 +
 +static const int stos[] =
 +    { efGRO, efG96, efPDB, efBRK, efENT, efESP, efXYZ };
 +#define NSTOS asize(stos)
 +
 +static const int stxs[] =
 +    { efGRO, efG96, efPDB, efBRK, efENT, efESP, efXYZ,
 +#ifdef USE_XDR 
 +        efTPR,
 +#endif 
 +        efTPB, efTPA };
 +#define NSTXS asize(stxs)
 +
 +static const int tpxs[] =
 +    {
 +#ifdef USE_XDR
 +        efTPR,
 +#endif
 +        efTPB, efTPA };
 +#define NTPXS asize(tpxs)
 +
 +static const int tpss[] =
 +    {
 +#ifdef USE_XDR
 +        efTPR,
 +#endif
 +        efTPB, efTPA, efGRO, efG96, efPDB, efBRK, efENT };
 +#define NTPSS asize(tpss)
 +
 +typedef struct
 +{
 +    int ftype;
 +    const char *ext;
 +    const char *defnm;
 +    const char *defopt;
 +    const char *descr;
 +    int ntps;
 +    const int *tps;
 +} t_deffile;
 +
 +/* this array should correspond to the enum in include/types/filenm.h */
 +static const t_deffile
 +    deffile[efNR] =
 +{
 +    { eftASC, ".mdp", "grompp", "-f", "grompp input file with MD parameters" },
 +    { eftASC, ".gct", "gct",    "-f", "General coupling stuff"},
 +    { eftGEN, ".???", "traj", "-f",
 +      "Trajectory: xtc trr trj gro g96 pdb cpt", NTRXS, trxs },
 +    { eftGEN, ".???", "trajout", "-f",
 +      "Trajectory: xtc trr trj gro g96 pdb", NTROS, tros },
 +    { eftGEN, ".???", "traj", NULL,
 +      "Full precision trajectory: trr trj cpt", NTRNS, trns },
 +    { eftXDR, ".trr", "traj", NULL, "Trajectory in portable xdr format" },
 +    { eftBIN, ".trj", "traj", NULL, "Trajectory file (architecture specific)" },
 +    { eftXDR, ".xtc", "traj", NULL,
 +      "Compressed trajectory (portable xdr format)" },
 +    { eftASC, ".g87", "gtraj", NULL, "Gromos-87 ASCII trajectory format" },
 +    { eftXDR, ".edr", "ener",   NULL, "Energy file"},
 +    { eftGEN, ".???", "conf", "-c", "Structure file: gro g96 pdb tpr etc.", 
 +      NSTXS, stxs },
 +    { eftGEN, ".???", "out", "-o", "Structure file: gro g96 pdb etc.", 
 +      NSTOS, stos },
 +    { eftASC, ".gro", "conf", "-c", "Coordinate file in Gromos-87 format" },
 +    { eftASC, ".g96", "conf", "-c", "Coordinate file in Gromos-96 format" },
 +    { eftASC, ".pdb", "eiwit",  "-f", "Protein data bank file"},
 +    { eftASC, ".brk", "eiwit",  "-f", "Brookhaven data bank file"},
 +    { eftASC, ".ent", "eiwit", "-f", "Entry in the protein date bank" },
 +    { eftASC, ".esp", "conf", "-f", "Coordinate file in Espresso format" },
 +    { eftASC, ".pqr", "state",  "-o", "Coordinate file for MEAD"},
 +    { eftASC, ".xyz", "conf", "-o", "Coordinate file for some other programs" },
 +    { eftXDR, ".cpt", "state",  "-cp","Checkpoint file"},
 +    { eftASC, ".log", "run",    "-l", "Log file"},
 +    { eftASC, ".xvg", "graph",  "-o", "xvgr/xmgr file"},
 +    { eftASC, ".out", "hello",  "-o", "Generic output file"},
 +    { eftASC, ".ndx", "index",  "-n", "Index file",},
 +    { eftASC, ".top", "topol",  "-p", "Topology file"},
 +    { eftASC, ".itp", "topinc", NULL, "Include file for topology"},
 +    { eftGEN, ".???", "topol", "-s", "Run input file: tpr tpb tpa",
 +      NTPXS, tpxs },
 +    { eftGEN, ".???", "topol", "-s",
 +      "Structure+mass(db): tpr tpb tpa gro g96 pdb", NTPSS, tpss },
 +    { eftXDR, ".tpr", "topol",  "-s", "Portable xdr run input file"},
 +    { eftASC, ".tpa", "topol",  "-s", "Ascii run input file"},
 +    { eftBIN, ".tpb", "topol",  "-s", "Binary run input file"},
 +    { eftASC, ".tex", "doc",    "-o", "LaTeX file"},
 +    { eftASC, ".rtp", "residue", NULL, "Residue Type file used by pdb2gmx" },
 +    { eftASC, ".atp", "atomtp", NULL, "Atomtype file used by pdb2gmx" },
 +    { eftASC, ".hdb", "polar",  NULL, "Hydrogen data base"},
 +    { eftASC, ".dat", "nnnice", NULL, "Generic data file"},
 +    { eftASC, ".dlg", "user",   NULL, "Dialog Box data for ngmx"},
 +    { eftASC, ".map", "ss", NULL, "File that maps matrix data to colors" },
 +    { eftASC, ".eps", "plot", NULL, "Encapsulated PostScript (tm) file" },
 +    { eftASC, ".mat", "ss",     NULL, "Matrix Data file"},
 +    { eftASC, ".m2p", "ps",     NULL, "Input file for mat2ps"},
 +    { eftXDR, ".mtx", "hessian","-m", "Hessian matrix"},
 +    { eftASC, ".edi", "sam",    NULL, "ED sampling input"},
 +    { eftASC, ".hat", "gk", NULL, "Fourier transform of spread function" },
 +    { eftASC, ".cub", "pot",  NULL, "Gaussian cube file" },
 +    { eftASC, ".xpm", "root", NULL, "X PixMap compatible matrix file" },
 +    { eftASC, "", "rundir", NULL, "Run directory" } 
 +};
 +
 +static char *default_file_name = NULL;
 +
 +#ifdef GMX_THREAD_MPI
 +static tMPI_Thread_mutex_t filenm_mutex=TMPI_THREAD_MUTEX_INITIALIZER;
 +#endif
 +
 +#define NZEXT 2
 +const char *z_ext[NZEXT] =
 +    { ".gz", ".Z" };
 +
 +void set_default_file_name(const char *name)
 +{
 +    int i;
 +#ifdef GMX_THREAD_MPI
 +    tMPI_Thread_mutex_lock(&filenm_mutex);
 +#endif
 +    default_file_name = strdup(name);
 +#ifdef GMX_THREAD_MPI
 +    tMPI_Thread_mutex_unlock(&filenm_mutex);
 +#endif
 +
 +#if 0
 +    for(i=0; i<efNR; i++)
 +    deffile[i].defnm = default_file_name;
 +#endif
 +}
 +
 +const char *ftp2ext(int ftp)
 +{
 +    if ((0 <= ftp) && (ftp < efNR))
 +        return deffile[ftp].ext + 1;
 +    else
 +        return "unknown";
 +}
 +
 +const char *ftp2ext_generic(int ftp)
 +{
 +    if ((0 <= ftp) && (ftp < efNR))
 +    {
 +        switch (ftp)
 +        {
 +        case efTRX:
 +            return "trx";
 +        case efTRN:
 +            return "trn";
 +        case efSTO:
 +            return "sto";
 +        case efSTX:
 +            return "stx";
 +        case efTPX:
 +            return "tpx";
 +        case efTPS:
 +            return "tps";
 +        default:
 +            return ftp2ext(ftp);
 +        }
 +    }
 +    else
 +        return "unknown";
 +}
 +
 +const char *ftp2desc(int ftp)
 +{
 +    if ((0 <= ftp) && (ftp < efNR))
 +        return deffile[ftp].descr;
 +    else
 +        return "unknown filetype";
 +}
 +
 +const char *ftp2ftype(int ftp)
 +{
 +    if ((ftp >= 0) && (ftp < efNR))
 +    {
 +        switch (deffile[ftp].ftype)
 +        {
 +        case eftASC:
 +            return "ASCII";
 +        case eftBIN:
 +            return "Binary";
 +        case eftXDR:
 +            return "XDR portable";
 +        case eftGEN:
 +            return "";
 +        default:
 +            gmx_fatal(FARGS, "Unknown filetype %d in ftp2ftype",deffile[ftp].ftype);
 +            break;
 +        }
 +    }
 +    return "unknown";
 +}
 +
 +const char *ftp2defnm(int ftp)
 +{
 +    const char *buf = NULL;
 +
 +#ifdef GMX_THREAD_MPI
 +    tMPI_Thread_mutex_lock(&filenm_mutex);
 +#endif
 +
 +    if (default_file_name)
 +    {
 +        buf = default_file_name;
 +    }
 +    else
 +    {
 +        if ((0 <= ftp) && (ftp < efNR))
 +        {
 +            buf = deffile[ftp].defnm;
 +        }
 +    }
 +#ifdef GMX_THREAD_MPI
 +    tMPI_Thread_mutex_unlock(&filenm_mutex);
 +#endif
 +
 +    return buf;
 +}
 +
 +void pr_def(FILE *fp, int ftp)
 +{
 +    const t_deffile *df;
 +    const char *s = NULL;
 +    char *flst, *tmp, *desc;
 +    const char *ext;
 +    const char *defnm;
 +
 +    df = &(deffile[ftp]);
 +    defnm = ftp2defnm(ftp);
 +    /* find default file extension and \tt-ify description */
 +    /* FIXME: The constness should not be cast away */
 +    flst = (char *) "";
 +    desc = strdup(df->descr);
 +
 +    if (df->ntps)
 +    {
 +        ext = deffile[df->tps[0]].ext;
 +        tmp = strstr(desc, ": ") + 1;
 +        if (tmp)
 +        {
 +            tmp[0] = '\0';
 +            tmp++;
 +            snew(flst,strlen(tmp)+6);
 +            strcpy(flst, " \\tt ");
 +            strcat(flst, tmp);
 +        }
 +    }
 +    else
 +    {
 +        ext = df->ext;
 +    }
 +    /* now skip dot */
 +    if (ext[0])
 +        ext++;
 +    else
 +        ext = "";
 +    /* set file contents type */
 +    switch (df->ftype)
 +    {
 +    case eftASC:
 +        s = "Asc";
 +        break;
 +    case eftBIN:
 +        s = "Bin";
 +        break;
 +    case eftXDR:
 +        s = "xdr";
 +        break;
 +    case eftGEN:
 +        s = "";
 +        break;
 +    default:
 +        gmx_fatal(FARGS, "Unimplemented filetype %d %d",ftp,
 +        df->ftype);
 +    }
 +    fprintf(fp,"\\tt %8s & \\tt %3s & %3s & \\tt %2s & %s%s \\\\[-0.1ex]\n",
 +        defnm, ext, s, df->defopt ? df->defopt : "",
 +        check_tex(desc),check_tex(flst));
 +    free(desc);
 +}
 +
 +void pr_fns(FILE *fp, int nf, const t_filenm tfn[])
 +{
 +    int i, f;
 +    size_t j;
 +    char buf[256], *wbuf, opt_buf[32];
 +#define OPTLEN 4
 +#define NAMELEN 14
 +    fprintf(fp, "%6s %12s  %-12s %s\n", "Option", "Filename", "Type",
 +            "Description");
 +    fprintf(fp,
 +            "------------------------------------------------------------\n");
 +    for (i = 0; (i < nf); i++)
 +    {
 +        for (f = 0; (f < tfn[i].nfiles); f++)
 +        {
 +            sprintf(buf, "%4s %14s  %-12s ", (f == 0) ? tfn[i].opt : "",
 +                    tfn[i].fns[f], (f == 0) ? fileopt(tfn[i].flag, opt_buf, 32)
 +                        : "");
 +            if (f < tfn[i].nfiles - 1)
 +                fprintf(fp, "%s\n", buf);
 +        }
 +        if (tfn[i].nfiles > 0)
 +        {
 +            strcat(buf, deffile[tfn[i].ftp].descr);
 +            if ((strlen(tfn[i].opt) > OPTLEN)
 +                && (strlen(tfn[i].opt) <= ((OPTLEN + NAMELEN)
 +                    - strlen(tfn[i].fns[tfn[i].nfiles - 1]))))
 +            {
 +                for (j = strlen(tfn[i].opt); j < strlen(buf)
 +                    - (strlen(tfn[i].opt) - OPTLEN) + 1; j++)
 +                    buf[j] = buf[j + strlen(tfn[i].opt) - OPTLEN];
 +            }
 +            wbuf = wrap_lines(buf, 78, 35, FALSE);
 +            fprintf(fp, "%s\n", wbuf);
 +            sfree(wbuf);
 +        }
 +    }
 +    fprintf(fp, "\n");
 +    fflush(fp);
 +}
 +
 +void pr_fopts(FILE *fp, int nf, const t_filenm tfn[], int shell)
 +{
 +    int i, j;
 +
 +    switch (shell)
 +    {
 +    case eshellCSH:
 +        for (i = 0; (i < nf); i++)
 +        {
 +            fprintf(fp, " \"n/%s/f:*.", tfn[i].opt);
 +            if (deffile[tfn[i].ftp].ntps)
 +            {
 +                fprintf(fp, "{");
 +                for (j = 0; j < deffile[tfn[i].ftp].ntps; j++)
 +                {
 +                    if (j > 0)
 +                        fprintf(fp, ",");
 +                    fprintf(fp, "%s", deffile[deffile[tfn[i].ftp].tps[j]].ext
 +                            + 1);
 +                }
 +                fprintf(fp, "}");
 +            }
 +            else
 +                fprintf(fp, "%s", deffile[tfn[i].ftp].ext + 1);
 +            fprintf(fp, "{");
 +            for (j = 0; j < NZEXT; j++)
 +                fprintf(fp, ",%s", z_ext[j]);
 +            fprintf(fp, "}/\"");
 +        }
 +        break;
 +    case eshellBASH:
 +        for (i = 0; (i < nf); i++)
 +        {
 +            fprintf(fp, "%s) COMPREPLY=( $(compgen -X '!*.", tfn[i].opt);
 +            if (deffile[tfn[i].ftp].ntps)
 +            {
 +                fprintf(fp, "+(");
 +                for (j = 0; j < deffile[tfn[i].ftp].ntps; j++)
 +                {
 +                    if (j > 0)
 +                        fprintf(fp, "|");
 +                    fprintf(fp, "%s", deffile[deffile[tfn[i].ftp].tps[j]].ext
 +                            + 1);
 +                }
 +                fprintf(fp, ")");
 +            }
 +            else
 +                fprintf(fp, "%s", deffile[tfn[i].ftp].ext + 1);
 +            fprintf(fp, "*(");
 +            for (j = 0; j < NZEXT; j++)
 +            {
 +                if (j > 0)
 +                    fprintf(fp, "|");
 +                fprintf(fp, "%s", z_ext[j]);
 +            }
 +            fprintf(fp, ")' -f $c ; compgen -S '/' -X '.*' -d $c ));;\n");
 +        }
 +        break;
 +    case eshellZSH:
 +        for (i = 0; (i < nf); i++)
 +        {
 +            fprintf(fp, "- 'c[-1,%s]' -g '*.", tfn[i].opt);
 +            if (deffile[tfn[i].ftp].ntps)
 +            {
 +                fprintf(fp, "(");
 +                for (j = 0; j < deffile[tfn[i].ftp].ntps; j++)
 +                {
 +                    if (j > 0)
 +                        fprintf(fp, "|");
 +                    fprintf(fp, "%s", deffile[deffile[tfn[i].ftp].tps[j]].ext
 +                            + 1);
 +                }
 +                fprintf(fp, ")");
 +            }
 +            else
 +                fprintf(fp, "%s", deffile[tfn[i].ftp].ext + 1);
 +            fprintf(fp, "(");
 +            for (j = 0; j < NZEXT; j++)
 +                fprintf(fp, "|%s", z_ext[j]);
 +            fprintf(fp, ") *(/)' ");
 +        }
 +        break;
 +    }
 +}
 +
 +static void check_opts(int nf, t_filenm fnm[])
 +{
 +    int i;
 +    const t_deffile *df;
 +
 +    for (i = 0; (i < nf); i++)
 +    {
 +        df = &(deffile[fnm[i].ftp]);
 +        if (fnm[i].opt == NULL)
 +        {
 +            if (df->defopt == NULL)
 +            {
 +                gmx_fatal(FARGS, "No default cmd-line option for %s (type %d)\n",
 +                          deffile[fnm[i].ftp].ext,fnm[i].ftp);
 +            }
 +            else
 +            {
 +                fnm[i].opt=df->defopt;
 +            }
 +        }
 +    }
 +}
 +
 +int fn2ftp(const char *fn)
 +{
 +    int i, len;
 +    const char *feptr;
 +    const char *eptr;
 +
 +    if (!fn)
 +        return efNR;
 +
 +    len = strlen(fn);
 +    if ((len >= 4) && (fn[len - 4] == '.'))
 +        feptr = &(fn[len - 4]);
 +    else
 +        return efNR;
 +
 +    for (i = 0; (i < efNR); i++)
 +        if ((eptr = deffile[i].ext) != NULL)
 +            if (gmx_strcasecmp(feptr, eptr) == 0)
 +                break;
 +
 +    return i;
 +}
 +
 +static void set_extension(char *buf, int ftp)
 +{
 +    int len, extlen;
 +    const t_deffile *df;
 +
 +    /* check if extension is already at end of filename */
 +    df = &(deffile[ftp]);
 +    len = strlen(buf);
 +    extlen = strlen(df->ext);
 +    if ((len <= extlen) || (gmx_strcasecmp(&(buf[len - extlen]), df->ext) != 0))
 +        strcat(buf, df->ext);
 +}
 +
 +static void add_filenm(t_filenm *fnm, const char *filenm)
 +{
 +    srenew(fnm->fns, fnm->nfiles+1);
 +    fnm->fns[fnm->nfiles] = strdup(filenm);
 +    fnm->nfiles++;
 +}
 +
 +static void set_grpfnm(t_filenm *fnm, const char *name, gmx_bool bCanNotOverride)
 +{
 +    char buf[256], buf2[256];
 +    int i, type;
 +    gmx_bool bValidExt;
 +    int nopts;
 +    const int *ftps;
 +
 +    nopts = deffile[fnm->ftp].ntps;
 +    ftps = deffile[fnm->ftp].tps;
 +    if ((nopts == 0) || (ftps == NULL))
 +        gmx_fatal(FARGS, "nopts == 0 || ftps == NULL");
 +
 +    bValidExt = FALSE;
 +    if (name && (bCanNotOverride || (default_file_name == NULL)))
 +    {
 +        strcpy(buf,name);
 +        /* First check whether we have a valid filename already */
 +        type = fn2ftp(name);
 +        if ((fnm->flag & ffREAD) && (fnm->ftp == efTRX))
 +        {
 +            /*if file exist don't add an extension for trajectory reading*/
 +            bValidExt = gmx_fexist(name); 
 +        }
 +        for(i=0; (i<nopts) && !bValidExt; i++)
 +        {
 +            if (type == ftps[i])
 +            {
 +                bValidExt = TRUE;
 +            }
 +        }
 +    }
 +    else
 +        /* No name given, set the default name */
 +        strcpy(buf,ftp2defnm(fnm->ftp));
 +
 +    if (!bValidExt && (fnm->flag & ffREAD))
 +    {
 +        /* for input-files only: search for filenames in the directory */
 +        for(i=0; (i<nopts) && !bValidExt; i++)
 +        {
 +            type = ftps[i];
 +            strcpy(buf2,buf);
 +            set_extension(buf2,type);
 +            if (gmx_fexist(buf2))
 +            {
 +                bValidExt = TRUE;
 +                strcpy(buf,buf2);
 +            }
 +        }
 +    }
 +
 +    if (!bValidExt)
 +    {
 +        /* Use the first extension type */
 +        set_extension(buf,ftps[0]);
 +    }
 +
 +    add_filenm(fnm, buf);
 +}
 +
 +static void set_filenm(t_filenm *fnm, const char *name, gmx_bool bCanNotOverride,
 +                       gmx_bool bReadNode)
 +{
 +    /* Set the default filename, extension and option for those fields that 
 +     * are not already set. An extension is added if not present, if fn = NULL
 +     * or empty, the default filename is given.
 +     */
 +    char buf[256];
 +    int i, len, extlen;
 +
 +    if ((fnm->flag & ffREAD) && !bReadNode)
 +    {
 +        return;
 +    }
 +
 +    if ((fnm->ftp < 0) || (fnm->ftp >= efNR))
 +        gmx_fatal(FARGS, "file type out of range (%d)",fnm->ftp);
 +
 +    if (name)
 +        strcpy(buf, name);
 +    if ((fnm->flag & ffREAD) && name && gmx_fexist(name))
 +    {
 +        /* check if filename ends in .gz or .Z, if so remove that: */
 +        len = strlen(name);
 +        for (i=0; i<NZEXT; i++)
 +        {
 +            extlen = strlen(z_ext[i]);
 +            if (len > extlen)
 +            {
 +                if (gmx_strcasecmp(name+len-extlen,z_ext[i]) == 0)
 +                {
 +                    buf[len-extlen]='\0';
 +                    break;
 +                }
 +            }
 +        }
 +    }
 +
 +    if (deffile[fnm->ftp].ntps)
 +    {
 +        set_grpfnm(fnm,name ? buf : NULL,bCanNotOverride);
 +    }
 +    else
 +    {
 +        if ((name == NULL) || !(bCanNotOverride || (default_file_name ==NULL)))
 +        {
 +            const char *defnm=ftp2defnm(fnm->ftp);
 +            strcpy(buf,defnm);
 +        }
 +        set_extension(buf,fnm->ftp);
 +        
 +        add_filenm(fnm, buf);
 +    }
 +}
 +
 +static void set_filenms(int nf, t_filenm fnm[], gmx_bool bReadNode)
 +{
 +    int i;
 +
 +    for (i = 0; (i < nf); i++)
 +        if (!IS_SET(fnm[i]))
 +            set_filenm(&(fnm[i]), fnm[i].fn, FALSE, bReadNode);
 +}
 +
 +void parse_file_args(int *argc, char *argv[], int nf, t_filenm fnm[],
 +                     gmx_bool bKeep, gmx_bool bReadNode)
 +{
 +    int i, j;
 +    gmx_bool *bRemove;
 +
 +    check_opts(nf, fnm);
 +
 +    for (i = 0; (i < nf); i++)
 +        UN_SET(fnm[i]);
 +
 +    if (*argc > 1)
 +    {
 +        snew(bRemove,(*argc)+1);
 +        i = 1;
 +        do
 +        {
 +            for (j = 0; (j < nf); j++)
 +            {
 +                if (strcmp(argv[i], fnm[j].opt) == 0)
 +                {
 +                    DO_SET(fnm[j]);
 +                    bRemove[i] = TRUE;
 +                    i++;
 +                    /* check if we are out of arguments for this option */
 +                    if ((i >= *argc) || (argv[i][0] == '-'))
 +                        set_filenm(&fnm[j], fnm[j].fn, FALSE, bReadNode);
 +                    /* sweep up all file arguments for this option */
 +                    while ((i < *argc) && (argv[i][0] != '-'))
 +                    {
 +                        set_filenm(&fnm[j], argv[i], TRUE, bReadNode);
 +                        bRemove[i] = TRUE;
 +                        i++;
 +                        /* only repeat for 'multiple' file options: */
 +                        if (!IS_MULT(fnm[j]))
 +                            break;
 +                    }
 +
 +                    break; /* jump out of 'j' loop */
 +                }
 +            }
 +            /* No file found corresponding to option argv[i] */
 +            if (j == nf)
 +                i++;
 +        } while (i < *argc);
 +
 +        if (!bKeep)
 +        {
 +            /* Remove used entries */
 +            for (i = j = 0; (i <= *argc); i++)
 +            {
 +                if (!bRemove[i])
 +                    argv[j++] = argv[i];
 +            }
 +            (*argc) = j - 1;
 +        }
 +        sfree(bRemove);
 +    }
 +
 +    set_filenms(nf, fnm, bReadNode);
 +
 +}
 +
 +const char *opt2fn(const char *opt, int nfile, const t_filenm fnm[])
 +{
 +    int i;
 +
 +    for (i = 0; (i < nfile); i++)
 +        if (strcmp(opt, fnm[i].opt) == 0)
 +        {
 +            return fnm[i].fns[0];
 +        }
 +
 +    fprintf(stderr, "No option %s\n", opt);
 +
 +    return NULL;
 +}
 +
 +const char *opt2fn_master(const char *opt, int nfile, const t_filenm fnm[],
 +                          t_commrec *cr)
 +{
 +    return SIMMASTER(cr) ? opt2fn(opt, nfile, fnm) : NULL;
 +}
 +
 +int opt2fns(char **fns[], const char *opt, int nfile, const t_filenm fnm[])
 +{
 +    int i;
 +
 +    for (i = 0; (i < nfile); i++)
 +        if (strcmp(opt, fnm[i].opt) == 0)
 +        {
 +            *fns = fnm[i].fns;
 +            return fnm[i].nfiles;
 +        }
 +
 +    fprintf(stderr, "No option %s\n", opt);
 +    return 0;
 +}
 +
 +const char *ftp2fn(int ftp, int nfile, const t_filenm fnm[])
 +{
 +    int i;
 +
 +    for (i = 0; (i < nfile); i++)
 +        if (ftp == fnm[i].ftp)
 +            return fnm[i].fns[0];
 +
 +    fprintf(stderr, "ftp2fn: No filetype %s\n", deffile[ftp].ext);
 +    return NULL;
 +}
 +
 +int ftp2fns(char **fns[], int ftp, int nfile, const t_filenm fnm[])
 +{
 +    int i;
 +
 +    for (i = 0; (i < nfile); i++)
 +        if (ftp == fnm[i].ftp)
 +        {
 +            *fns = fnm[i].fns;
 +            return fnm[i].nfiles;
 +        }
 +
 +    fprintf(stderr, "ftp2fn: No filetype %s\n", deffile[ftp].ext);
 +    return 0;
 +}
 +
 +gmx_bool ftp2bSet(int ftp, int nfile, const t_filenm fnm[])
 +{
 +    int i;
 +
 +    for (i = 0; (i < nfile); i++)
 +        if (ftp == fnm[i].ftp)
 +            return (gmx_bool) IS_SET(fnm[i]);
 +
 +    fprintf(stderr, "ftp2bSet: No filetype %s\n", deffile[ftp].ext);
 +
 +    return FALSE;
 +}
 +
 +gmx_bool opt2bSet(const char *opt, int nfile, const t_filenm fnm[])
 +{
 +    int i;
 +
 +    for (i = 0; (i < nfile); i++)
 +        if (strcmp(opt, fnm[i].opt) == 0)
 +            return (gmx_bool) IS_SET(fnm[i]);
 +
 +    fprintf(stderr, "No option %s\n", opt);
 +
 +    return FALSE;
 +}
 +
 +const char *opt2fn_null(const char *opt, int nfile, const t_filenm fnm[])
 +{
 +    int i;
 +
 +    for (i = 0; (i < nfile); i++)
 +        if (strcmp(opt, fnm[i].opt) == 0)
 +        {
 +            if (IS_OPT(fnm[i]) && !IS_SET(fnm[i]))
 +                return NULL;
 +            else
 +                return fnm[i].fns[0];
 +        }
 +    fprintf(stderr, "No option %s\n", opt);
 +    return NULL;
 +}
 +
 +const char *ftp2fn_null(int ftp, int nfile, const t_filenm fnm[])
 +{
 +    int i;
 +
 +    for (i = 0; (i < nfile); i++)
 +        if (ftp == fnm[i].ftp)
 +        {
 +            if (IS_OPT(fnm[i]) && !IS_SET(fnm[i]))
 +                return NULL;
 +            else
 +                return fnm[i].fns[0];
 +        }
 +    fprintf(stderr, "ftp2fn: No filetype %s\n", deffile[ftp].ext);
 +    return NULL;
 +}
 +
 +#if 0
 +static void add_filters(char *filter,int *n,int nf,const int ftp[])
 +{
 +    char buf[8];
 +    int i;
 +
 +    sprintf(filter,"*.{");
 +    for(i=0; (i<nf); i++)
 +    {
 +        sprintf(buf,"%s",ftp2ext(ftp[i]));
 +        if (*n > 0)
 +            strcat(filter,",");
 +        strcat(filter,buf);
 +        (*n) ++;
 +    }
 +    strcat(filter,"}");
 +}
 +
 +char *ftp2filter(int ftp)
 +{
 +    int n;
 +    static char filter[128];
 +
 +    filter[0] = '\0';
 +    n = 0;
 +    switch (ftp)
 +    {
 +    case efTRX:
 +        add_filters(filter,&n,NTRXS,trxs);
 +        break;
 +    case efTRN:
 +        add_filters(filter,&n,NTRNS,trns);
 +        break;
 +    case efSTO:
 +        add_filters(filter,&n,NSTOS,stos);
 +        break;
 +    case efSTX:
 +        add_filters(filter,&n,NSTXS,stxs);
 +        break;
 +    case efTPX:
 +        add_filters(filter,&n,NTPXS,tpxs);
 +        break;
 +    default:
 +        sprintf(filter,"*%s",ftp2ext(ftp));
 +        break;
 +    }
 +    return filter;
 +}
 +#endif
 +
 +gmx_bool is_optional(const t_filenm *fnm)
 +{
 +    return ((fnm->flag & ffOPT) == ffOPT);
 +}
 +
 +gmx_bool is_output(const t_filenm *fnm)
 +{
 +    return ((fnm->flag & ffWRITE) == ffWRITE);
 +}
 +
 +gmx_bool is_set(const t_filenm *fnm)
 +{
 +    return ((fnm->flag & ffSET) == ffSET);
 +}
 +
 +int add_suffix_to_output_names(t_filenm *fnm, int nfile, const char *suffix)
 +{
 +    int i, j, pos;
 +    char buf[STRLEN], newname[STRLEN];
 +    char *extpos;
 +
 +    for (i = 0; i < nfile; i++)
 +    {
 +        if (is_output(&fnm[i]) && fnm[i].ftp != efCPT)
 +        {
 +            /* We never use multiple _outputs_, but we might as well check 
 +             for it, just in case... */
 +            for (j = 0; j < fnm[i].nfiles; j++)
 +            {
 +                strncpy(buf, fnm[i].fns[j], STRLEN - 1);
 +                extpos = strrchr(buf, '.');
 +                *extpos = '\0';
 +                sprintf(newname, "%s%s.%s", buf, suffix, extpos + 1);
 +                free(fnm[i].fns[j]);
 +                fnm[i].fns[j] = strdup(newname);
 +            }
 +        }
 +    }
 +    return 0;
 +}
 +
 +t_filenm *dup_tfn(int nf, const t_filenm tfn[])
 +{
 +    int i, j;
 +    t_filenm *ret;
 +
 +    snew(ret, nf);
 +    for (i = 0; i < nf; i++)
 +    {
 +        ret[i] = tfn[i]; /* just directly copy all non-string fields */
 +        if (tfn[i].opt)
 +            ret[i].opt = strdup(tfn[i].opt);
 +        else
 +            ret[i].opt = NULL;
 +
 +        if (tfn[i].fn)
 +            ret[i].fn = strdup(tfn[i].fn);
 +        else
 +            ret[i].fn = NULL;
 +
 +        if (tfn[i].nfiles > 0)
 +        {
 +            snew(ret[i].fns,tfn[i].nfiles);
 +            for (j = 0; j < tfn[i].nfiles; j++)
 +            {
 +                ret[i].fns[j] = strdup(tfn[i].fns[j]);
 +            }
 +        }
 +    }
 +    return ret;
 +}
 +
 +void done_filenms(int nf, t_filenm fnm[])
 +{
 +    int i, j;
 +
 +    for (i = 0; i < nf; ++i)
 +    {
 +        for (j = 0; j < fnm[i].nfiles; ++j)
 +        {
 +            sfree(fnm[i].fns[j]);
 +        }
 +        sfree(fnm[i].fns);
 +        fnm[i].fns = NULL;
 +    }
 +}
index 71bf75ac7e6ba34671cdc5d131c79961390021c4,0000000000000000000000000000000000000000..dfe5c298e5192f93b39981739aef1e43e6b04783
mode 100644,000000..100644
--- /dev/null
@@@ -1,859 -1,0 +1,1066 @@@
- #include "gmx_cpuid.h"
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + * This file is part of GROMACS.
 + * Copyright (c) 2012-  
 + *
 + * Written by the Gromacs development team under coordination of
 + * David van der Spoel, Berk Hess, and Erik Lindahl.
 + *
 + * This library is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the research papers on the package. Check out http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gnomes, ROck Monsters And Chili Sauce
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#ifdef HAVE_SCHED_H
 +#define _GNU_SOURCE
 +#include <sched.h>
 +#endif
 +
 +#include <stdio.h>
 +#include <stdlib.h>
 +#include <string.h>
 +#include <ctype.h>
 +#ifdef _MSC_VER
 +/* MSVC definition for __cpuid() */
 +#include <intrin.h>
++/* sysinfo functions */
++#include <windows.h>
 +#endif
 +#ifdef HAVE_UNISTD_H
 +/* sysconf() definition */
 +#include <unistd.h>
 +#endif
 +
++#include "gmx_cpuid.h"
 +
 +
 +
- /* Currently CPUID is only supported (1) if we can use an instruction on MSVC, or (2)
-  * if the compiler handles GNU-style inline assembly.
-  */
- #if defined (__i386__) || defined (__x86_64__) || defined (_M_IX86) || defined (_M_X64)
++/* For convenience, and to enable configure-time invocation, we keep all architectures
++ * in a single file, but to avoid repeated ifdefs we set the overall architecture here.
++ */
++#if defined (__i386__) || defined (__x86_64__) || defined (_M_IX86) || defined (_M_X64)
++#    define GMX_CPUID_X86
++#endif
 +
 +/* Global constant character strings corresponding to our enumerated types */
 +const char *
 +gmx_cpuid_vendor_string[GMX_CPUID_NVENDORS] =
 +{
 +    "CannotDetect",
 +    "Unknown",
 +    "GenuineIntel",
 +    "AuthenticAMD"
 +};
 +
 +const char *
 +gmx_cpuid_feature_string[GMX_CPUID_NFEATURES] =
 +{
 +    "CannotDetect",
 +    "aes",
 +    "apic",
 +    "avx",
 +    "avx2",
 +    "clfsh",
 +    "cmov",
 +    "cx8",
 +    "cx16",
 +    "f16c",
 +    "fma",
 +    "fma4",
 +    "htt",
 +    "lahf_lm",
 +    "misalignsse",
 +    "mmx",
 +    "msr",
 +    "nonstop_tsc",
 +    "pcid",
 +    "pclmuldq",
 +    "pdcm",
 +    "pdpe1gb",
 +    "popcnt",
 +    "pse",
 +    "rdrnd",
 +    "rdtscp",
 +    "sse2",
 +    "sse3",
 +    "sse4a",
 +    "sse4.1",
 +    "sse4.2",
 +    "ssse3",
 +    "tdt",
 +    "x2apic",
 +    "xop"
 +};
 +
 +const char *
 +gmx_cpuid_acceleration_string[GMX_CPUID_NACCELERATIONS] =
 +{
 +    "CannotDetect",
 +    "None",
 +    "SSE2",
 +    "SSE4.1",
 +    "AVX_128_FMA",
 +    "AVX_256"
 +};
 +
 +/* Max length of brand string */
 +#define GMX_CPUID_BRAND_MAXLEN 256
 +
 +
 +/* Contents of the abstract datatype */
 +struct gmx_cpuid
 +{
 +    enum gmx_cpuid_vendor      vendor;
 +    char                       brand[GMX_CPUID_BRAND_MAXLEN];
 +    int                        family;
 +    int                        model;
 +    int                        stepping;
 +    /* Not using gmx_bool here, since this file must be possible to compile without simple.h */
 +    char                       feature[GMX_CPUID_NFEATURES];
++    
++    /* Basic CPU topology information. For x86 this is a bit complicated since the topology differs between
++     * operating systems and sometimes even settings. For most other architectures you can likely just check
++     * the documentation and then write static information to these arrays rather than detecting on-the-fly.
++     */
++    int                        have_cpu_topology;
++    int                        nproc;               /* total number of logical processors from OS */
++    int                        npackages;
++    int                        ncores_per_package;
++    int                        nhwthreads_per_core;
++    int *                      package_id;
++    int *                      core_id;             /* Local core id in each package */
++    int *                      hwthread_id;         /* Local hwthread id in each core */
++    int *                      locality_order;      /* Processor indices sorted in locality order */
 +};
 +
 +
 +/* Simple routines to access the data structure. The initialization routine is
 + * further down since that needs to call other static routines in this file.
 + */
 +enum gmx_cpuid_vendor
 +gmx_cpuid_vendor            (gmx_cpuid_t                cpuid)
 +{
 +    return cpuid->vendor;
 +}
 +
 +
 +const char *
 +gmx_cpuid_brand             (gmx_cpuid_t                cpuid)
 +{
 +    return cpuid->brand;
 +}
 +
 +int
 +gmx_cpuid_family            (gmx_cpuid_t                cpuid)
 +{
 +    return cpuid->family;
 +}
 +
 +int
 +gmx_cpuid_model             (gmx_cpuid_t                cpuid)
 +{
 +    return cpuid->model;
 +}
 +
 +int
 +gmx_cpuid_stepping          (gmx_cpuid_t                cpuid)
 +{
 +    return cpuid->stepping;
 +}
 +
 +int
 +gmx_cpuid_feature           (gmx_cpuid_t                cpuid,
 +                             enum gmx_cpuid_feature     feature)
 +{
 +    return (cpuid->feature[feature]!=0);
 +}
 +
 +
 +
 +
 +/* What type of acceleration was compiled in, if any?
 + * This is set from Cmake. Note that the SSE2 and SSE4_1 macros are set for
 + * AVX too, so it is important that they appear last in the list.
 + */
 +#ifdef GMX_X86_AVX_256
 +static const
 +enum gmx_cpuid_acceleration
 +compiled_acc = GMX_CPUID_ACCELERATION_X86_AVX_256;
 +#elif defined GMX_X86_AVX_128_FMA
 +static const
 +enum gmx_cpuid_acceleration
 +compiled_acc = GMX_CPUID_ACCELERATION_X86_AVX_128_FMA;
 +#elif defined GMX_X86_SSE4_1
 +static const
 +enum gmx_cpuid_acceleration
 +compiled_acc = GMX_CPUID_ACCELERATION_X86_SSE4_1;
 +#elif defined GMX_X86_SSE2
 +static const
 +enum gmx_cpuid_acceleration
 +compiled_acc = GMX_CPUID_ACCELERATION_X86_SSE2;
 +#else
 +static const
 +enum gmx_cpuid_acceleration
 +compiled_acc = GMX_CPUID_ACCELERATION_NONE;
 +#endif
 +
 +
- #endif /* architecture is x86 */
++#ifdef GMX_CPUID_X86
 +
 +/* Execute CPUID on x86 class CPUs. level sets function to exec, and the
 + * contents of register output is returned. See Intel/AMD docs for details.
 + *
 + * This version supports extended information where we can also have an input
 + * value in the ecx register. This is ignored for most levels, but some of them
 + * (e.g. level 0xB on Intel) use it.
 + */
 +static int
 +execute_x86cpuid(unsigned int   level,
 +                 unsigned int   ecxval,
 +                 unsigned int * eax,
 +                 unsigned int * ebx,
 +                 unsigned int * ecx,
 +                 unsigned int * edx)
 +{
 +    int rc = 0;
 +
++    /* Currently CPUID is only supported (1) if we can use an instruction on MSVC, or (2)
++     * if the compiler handles GNU-style inline assembly.
++     */
++
 +#if (defined _MSC_VER)
 +    int CPUInfo[4];
 +
 +#if (_MSC_VER > 1500) || (_MSC_VER==1500 & _MSC_FULL_VER >= 150030729)
 +    /* MSVC 9.0 SP1 or later */
 +    __cpuidex(CPUInfo,level,ecxval);
 +    rc = 0;
 +#else
 +    __cpuid(CPUInfo,level);
 +    /* Set an error code if the user wanted a non-zero ecxval, since we did not have cpuidex */
 +    rc = (ecxval>0) ? -1 : 0;
 +#endif
 +    *eax=CPUInfo[0];
 +    *ebx=CPUInfo[1];
 +    *ecx=CPUInfo[2];
 +    *edx=CPUInfo[3];
 +
 +#elif (defined GMX_X86_GCC_INLINE_ASM)
 +    /* for now this means GMX_X86_GCC_INLINE_ASM should be defined,
 +     * but there might be more options added in the future.
 +     */
 +    *eax = level;
 +    *ecx = ecxval;
 +    *ebx = 0;
 +    *edx = 0;
 +#if defined(__i386__) && defined(__PIC__)
 +    /* Avoid clobbering the global offset table in 32-bit pic code (ebx register) */
 +    __asm__ __volatile__ ("xchgl %%ebx, %1  \n\t"
 +                          "cpuid            \n\t"
 +                          "xchgl %%ebx, %1  \n\t"
 +                          : "+a"(*eax), "+r"(*ebx), "+c"(*ecx), "+d"(*edx));
 +#else
 +    /* i386 without PIC, or x86-64. Things are easy and we can clobber any reg we want :-) */
 +    __asm__ __volatile__ ("cpuid            \n\t"
 +                          : "+a"(*eax), "+b"(*ebx), "+c"(*ecx), "+d"(*edx));
 +#endif
 +    rc = 0;
 +#else
 +    /* Death and horror!
 +     * Apparently this is an x86 platform where we don't know how to call cpuid.
 +     *
 +     * This is REALLY bad, since we will lose all Gromacs acceleration.
 +     */
 +    *eax = 0;
 +    *ebx = 0;
 +    *ecx = 0;
 +    *edx = 0;
 +
 +    rc = -1;
 +#endif
 +    return rc;
 +}
 +
 +
 +/* Identify CPU features common to Intel & AMD - mainly brand string,
 + * version and some features. Vendor has already been detected outside this.
 + */
 +static int
 +cpuid_check_common_x86(gmx_cpuid_t                cpuid)
 +{
 +    int                       fn,max_stdfn,max_extfn;
 +    unsigned int              eax,ebx,ecx,edx;
 +    char                      str[GMX_CPUID_BRAND_MAXLEN];
 +    char *                    p;
 +
 +    /* Find largest standard/extended function input value */
 +    execute_x86cpuid(0x0,0,&eax,&ebx,&ecx,&edx);
 +    max_stdfn = eax;
 +    execute_x86cpuid(0x80000000,0,&eax,&ebx,&ecx,&edx);
 +    max_extfn = eax;
 +
 +    p = str;
 +    if(max_extfn>=0x80000005)
 +    {
 +        /* Get CPU brand string */
 +        for(fn=0x80000002;fn<0x80000005;fn++)
 +        {
 +            execute_x86cpuid(fn,0,&eax,&ebx,&ecx,&edx);
 +            memcpy(p,&eax,4);
 +            memcpy(p+4,&ebx,4);
 +            memcpy(p+8,&ecx,4);
 +            memcpy(p+12,&edx,4);
 +            p+=16;
 +        }
 +        *p='\0';
 +
 +        /* Remove empty initial space */
 +        p = str;
 +        while(isspace(*(p)))
 +        {
 +            p++;
 +        }
 +        strncpy(cpuid->brand,p,GMX_CPUID_BRAND_MAXLEN);
 +    }
 +    else
 +    {
 +        strncpy(cpuid->brand,"Unknown CPU brand",GMX_CPUID_BRAND_MAXLEN);
 +    }
 +
 +    /* Find basic CPU properties */
 +    if(max_stdfn>=1)
 +    {
 +        execute_x86cpuid(0x1,0,&eax,&ebx,&ecx,&edx);
 +
 +        cpuid->family   = ((eax & 0x0FF00000) >> 20) + ((eax & 0x00000F00) >> 8);
 +        /* Note that extended model should be shifted left 4, so only shift right 12 iso 16. */
 +        cpuid->model    = ((eax & 0x000F0000) >> 12) + ((eax & 0x000000F0) >> 4);
 +        cpuid->stepping = (eax & 0x0000000F);
 +
 +        /* Feature flags common to AMD and intel */
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_SSE3]     = (ecx & (1 << 0))  != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_PCLMULDQ] = (ecx & (1 << 1))  != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_SSSE3]    = (ecx & (1 << 9))  != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_FMA]      = (ecx & (1 << 12)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_CX16]     = (ecx & (1 << 13)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_SSE4_1]   = (ecx & (1 << 19)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_SSE4_2]   = (ecx & (1 << 20)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_POPCNT]   = (ecx & (1 << 23)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_AES]      = (ecx & (1 << 25)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_AVX]      = (ecx & (1 << 28)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_F16C]     = (ecx & (1 << 29)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_RDRND]    = (ecx & (1 << 30)) != 0;
 +
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_PSE]      = (edx & (1 << 3))  != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_MSR]      = (edx & (1 << 5))  != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_CX8]      = (edx & (1 << 8))  != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_APIC]     = (edx & (1 << 9))  != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_CMOV]     = (edx & (1 << 15)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_CLFSH]    = (edx & (1 << 19)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_MMX]      = (edx & (1 << 23)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_SSE2]     = (edx & (1 << 26)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_HTT]      = (edx & (1 << 28)) != 0;
 +    }
 +    else
 +    {
 +        cpuid->family   = -1;
 +        cpuid->model    = -1;
 +        cpuid->stepping = -1;
 +    }
 +
 +    if(max_extfn>=0x80000001)
 +    {
 +        execute_x86cpuid(0x80000001,0,&eax,&ebx,&ecx,&edx);
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_LAHF_LM] = (ecx & (1 << 0))  != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_PDPE1GB] = (edx & (1 << 26)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_RDTSCP]  = (edx & (1 << 27)) != 0;
 +    }
 +
 +    if(max_extfn>=0x80000007)
 +    {
 +        execute_x86cpuid(0x80000007,0,&eax,&ebx,&ecx,&edx);
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_NONSTOP_TSC]  = (edx & (1 << 8))  != 0;
 +    }
 +    return 0;
 +}
 +
++/* This routine returns the number of unique different elements found in the array,
++ * and renumbers these starting from 0. For example, the array {0,1,2,8,9,10,8,9,10,0,1,2}
++ * will be rewritten to {0,1,2,3,4,5,3,4,5,0,1,2}, and it returns 6 for the
++ * number of unique elements.
++ */
++static int
++cpuid_renumber_elements(int *data, int n)
++{
++    int *unique;
++    int  i,j,nunique,found;
++
++    unique = malloc(sizeof(int)*n);
++    
++    nunique=0;
++    for(i=0;i<n;i++)
++    {
++        for(j=0,found=0;j<nunique && !found;j++)
++        {
++            found = (data[i]==unique[j]);
++        }
++        if(!found)
++        {
++            /* Insert in sorted order! */
++             for(j=nunique++;j>0 && unique[j-1]>data[i];j--)
++            {
++                unique[j]=unique[j-1];
++            }
++            unique[j]=data[i];
++        }
++    }
++    /* renumber */
++    for(i=0;i<n;i++)
++    {
++        for(j=0;j<nunique;j++)
++        {
++            if(data[i]==unique[j])
++            {
++                data[i]=j;
++            }
++        }
++    }
++    return nunique;
++}
++
++/* APIC IDs, or everything you wanted to know about your x86 cores but were afraid to ask...
++ *
++ * Raw APIC IDs are unfortunately somewhat dirty. For technical reasons they are assigned
++ * in power-of-2 chunks, and even then there are no guarantees about specific numbers - all
++ * we know is that the part for each thread/core/package is unique, and how many bits are
++ * reserved for that part. 
++ * This routine does internal renumbering so we get continuous indices, and also
++ * decodes the actual number of packages,cores-per-package and hwthreads-per-core.
++ */
++static void
++cpuid_x86_decode_apic_id(gmx_cpuid_t cpuid,int *apic_id,int core_bits,int hwthread_bits)
++{
++    int i,idx;
++    int hwthread_mask,core_mask_after_shift;
++    
++    cpuid->hwthread_id     = malloc(sizeof(int)*cpuid->nproc);
++    cpuid->core_id         = malloc(sizeof(int)*cpuid->nproc);
++    cpuid->package_id      = malloc(sizeof(int)*cpuid->nproc);
++    cpuid->locality_order  = malloc(sizeof(int)*cpuid->nproc);
++
++    hwthread_mask         = (1 << hwthread_bits) - 1;
++    core_mask_after_shift = (1 << core_bits) - 1;
++    
++    for(i=0;i<cpuid->nproc;i++)
++    {
++        cpuid->hwthread_id[i] = apic_id[i] & hwthread_mask;
++        cpuid->core_id[i]     = (apic_id[i] >> hwthread_bits) & core_mask_after_shift;
++        cpuid->package_id[i]  = apic_id[i] >> (core_bits + hwthread_bits);
++    }
++    
++    cpuid->npackages            = cpuid_renumber_elements(cpuid->package_id,cpuid->nproc);
++    cpuid->ncores_per_package   = cpuid_renumber_elements(cpuid->core_id,cpuid->nproc);
++    cpuid->nhwthreads_per_core  = cpuid_renumber_elements(cpuid->hwthread_id,cpuid->nproc);
++    
++    /* Create a locality order array, i.e. first all resources in package0, which in turn
++     * are sorted so we first have all resources in core0, where threads are sorted in order, etc.
++     */
++    for(i=0;i<cpuid->nproc;i++)
++    {
++        idx = (cpuid->package_id[i]*cpuid->ncores_per_package + cpuid->core_id[i])*cpuid->nhwthreads_per_core + cpuid->hwthread_id[i];
++        cpuid->locality_order[idx]=i;
++    }
++}
++
++
 +/* Detection of AMD-specific CPU features */
 +static int
 +cpuid_check_amd_x86(gmx_cpuid_t                cpuid)
 +{
 +    int                       max_stdfn,max_extfn;
 +    unsigned int              eax,ebx,ecx,edx;
++    int                       hwthread_bits,core_bits;
++    int *                     apic_id;
++    
 +    cpuid_check_common_x86(cpuid);
 +
 +    execute_x86cpuid(0x0,0,&eax,&ebx,&ecx,&edx);
 +    max_stdfn = eax;
 +
 +    execute_x86cpuid(0x80000000,0,&eax,&ebx,&ecx,&edx);
 +    max_extfn = eax;
 +
 +    if(max_extfn>=0x80000001)
 +    {
 +        execute_x86cpuid(0x80000001,0,&eax,&ebx,&ecx,&edx);
 +
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_SSE4A]       = (ecx & (1 << 6))  != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_MISALIGNSSE] = (ecx & (1 << 7))  != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_XOP]         = (ecx & (1 << 11)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_FMA4]        = (ecx & (1 << 16)) != 0;
 +    }
-     unsigned int              i;
++    
++    /* Query APIC information on AMD */
++    if(max_extfn>=0x80000008)
++    {
++#if (defined HAVE_SCHED_H && defined HAVE_SCHED_SETAFFINITY && defined HAVE_SYSCONF && defined __linux__)
++        /* Linux */
++        unsigned int   i;
++        cpu_set_t      cpuset,save_cpuset;
++        cpuid->nproc = sysconf(_SC_NPROCESSORS_ONLN);
++        apic_id      = malloc(sizeof(int)*cpuid->nproc);
++        sched_getaffinity(0,sizeof(cpu_set_t),&save_cpuset);
++        /* Get APIC id from each core */
++        CPU_ZERO(&cpuset);
++        for(i=0;i<cpuid->nproc;i++)
++        {
++            CPU_SET(i,&cpuset);
++            sched_setaffinity(0,sizeof(cpu_set_t),&cpuset);
++            execute_x86cpuid(0x1,0,&eax,&ebx,&ecx,&edx);
++            apic_id[i]=ebx >> 24;
++            CPU_CLR(i,&cpuset);
++        }
++        /* Reset affinity to the value it had when calling this routine */
++        sched_setaffinity(0,sizeof(cpu_set_t),&save_cpuset);
++#define CPUID_HAVE_APIC
++#elif defined GMX_NATIVE_WINDOWS
++        /* Windows */
++        DWORD_PTR     i;
++        SYSTEM_INFO   sysinfo;
++        unsigned int  save_affinity,affinity;
++        GetSystemInfo( &sysinfo );
++        cpuid->nproc  = sysinfo.dwNumberOfProcessors;
++        apic_id       = malloc(sizeof(int)*cpuid->nproc);
++        /* Get previous affinity mask */
++        save_affinity = SetThreadAffinityMask(GetCurrentThread(),1);
++        for(i=0;i<cpuid->nproc;i++)
++        {
++            SetThreadAffinityMask(GetCurrentThread(),(((DWORD_PTR)1)<<i));
++            Sleep(0);
++            execute_x86cpuid(0x1,0,&eax,&ebx,&ecx,&edx);
++            apic_id[i]=ebx >> 24;
++        }
++        SetThreadAffinityMask(GetCurrentThread(),save_affinity);
++#define CPUID_HAVE_APIC
++#endif
++#ifdef CPUID_HAVE_APIC
++        /* AMD does not support SMT yet - there are no hwthread bits in apic ID */
++        hwthread_bits = 0;
++        /* Get number of core bits in apic ID - try modern extended method first */
++        execute_x86cpuid(0x80000008,0,&eax,&ebx,&ecx,&edx);
++        core_bits = (ecx >> 12) & 0xf;
++        if(core_bits==0)
++        {
++            /* Legacy method for old single/dual core AMD CPUs */
++            int i = ecx & 0xF;
++            for(core_bits=0;(i>>core_bits)>0;core_bits++) ;
++        }
++        cpuid_x86_decode_apic_id(cpuid,apic_id,core_bits,hwthread_bits);
++        cpuid->have_cpu_topology = 1;
++#endif
++    }
 +    return 0;
 +}
 +
 +/* Detection of Intel-specific CPU features */
 +static int
 +cpuid_check_intel_x86(gmx_cpuid_t                cpuid)
 +{
 +    unsigned int              max_stdfn,max_extfn;
 +    unsigned int              eax,ebx,ecx,edx;
 +    unsigned int              max_logical_cores,max_physical_cores;
++    int                       hwthread_bits,core_bits;
++    int *                     apic_id;
 +
 +    cpuid_check_common_x86(cpuid);
 +
 +    execute_x86cpuid(0x0,0,&eax,&ebx,&ecx,&edx);
 +    max_stdfn = eax;
 +
 +    execute_x86cpuid(0x80000000,0,&eax,&ebx,&ecx,&edx);
 +    max_extfn = eax;
 +
 +    if(max_stdfn>=1)
 +    {
 +        execute_x86cpuid(0x1,0,&eax,&ebx,&ecx,&edx);
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_PDCM]    = (ecx & (1 << 15)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_PCID]    = (ecx & (1 << 17)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_X2APIC]  = (ecx & (1 << 21)) != 0;
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_TDT]     = (ecx & (1 << 24)) != 0;
 +    }
 +
 +    if(max_stdfn>=7)
 +    {
 +        execute_x86cpuid(0x7,0,&eax,&ebx,&ecx,&edx);
 +        cpuid->feature[GMX_CPUID_FEATURE_X86_AVX2]    = (ebx & (1 << 5))  != 0;
 +    }
 +
 +    /* Check whether Hyper-Threading is enabled, not only supported */
 +    if(cpuid->feature[GMX_CPUID_FEATURE_X86_HTT] && max_stdfn>=4)
 +    {
 +        execute_x86cpuid(0x1,0,&eax,&ebx,&ecx,&edx);
 +        max_logical_cores  = (ebx >> 16) & 0x0FF;
 +        execute_x86cpuid(0x4,0,&eax,&ebx,&ecx,&edx);
 +        max_physical_cores = ((eax >> 26) & 0x3F) + 1;
 +
 +        /* Clear HTT flag if we only have 1 logical core per physical */
 +        if(max_logical_cores/max_physical_cores < 2)
 +        {
 +            cpuid->feature[GMX_CPUID_FEATURE_X86_HTT] = 0;
 +        }
 +    }
++    
++    if(max_stdfn>=0xB)
++    {
++        /* Query x2 APIC information from cores */
++#if (defined HAVE_SCHED_H && defined HAVE_SCHED_SETAFFINITY && defined HAVE_SYSCONF && defined __linux__)
++        /* Linux */
++        unsigned int   i;
++        cpu_set_t      cpuset,save_cpuset;
++        cpuid->nproc = sysconf(_SC_NPROCESSORS_ONLN);
++        apic_id      = malloc(sizeof(int)*cpuid->nproc);
++        sched_getaffinity(0,sizeof(cpu_set_t),&save_cpuset);
++        /* Get x2APIC ID from each hardware thread */
++        CPU_ZERO(&cpuset);
++        for(i=0;i<cpuid->nproc;i++)
++        {
++            CPU_SET(i,&cpuset);
++            sched_setaffinity(0,sizeof(cpu_set_t),&cpuset);
++            execute_x86cpuid(0xB,0,&eax,&ebx,&ecx,&edx);
++            apic_id[i]=edx;
++            CPU_CLR(i,&cpuset);
++        }
++        /* Reset affinity to the value it had when calling this routine */
++        sched_setaffinity(0,sizeof(cpu_set_t),&save_cpuset);
++#define CPUID_HAVE_APIC
++#elif defined GMX_NATIVE_WINDOWS
++        /* Windows */
++        DWORD_PTR     i;
++        SYSTEM_INFO   sysinfo;
++        unsigned int  save_affinity,affinity;
++        GetSystemInfo( &sysinfo );
++        cpuid->nproc  = sysinfo.dwNumberOfProcessors;
++        apic_id       = malloc(sizeof(int)*cpuid->nproc);
++        /* Get previous affinity mask */
++        save_affinity = SetThreadAffinityMask(GetCurrentThread(),1);
++        for(i=0;i<cpuid->nproc;i++)
++        {
++            SetThreadAffinityMask(GetCurrentThread(),(((DWORD_PTR)1)<<i));
++            Sleep(0);
++            execute_x86cpuid(0xB,0,&eax,&ebx,&ecx,&edx);
++            apic_id[i]=edx;
++        }
++        SetThreadAffinityMask(GetCurrentThread(),save_affinity);
++#define CPUID_HAVE_APIC
++#endif
++#ifdef CPUID_HAVE_APIC
++        execute_x86cpuid(0xB,0,&eax,&ebx,&ecx,&edx);
++        hwthread_bits    = eax & 0x1F;
++        execute_x86cpuid(0xB,1,&eax,&ebx,&ecx,&edx);
++        core_bits        = (eax & 0x1F) - hwthread_bits;
++        cpuid_x86_decode_apic_id(cpuid,apic_id,core_bits,hwthread_bits);
++        cpuid->have_cpu_topology = 1;
++#endif
++    }
 +    return 0;
 +}
++#endif /* GMX_CPUID_X86 */
++
++
 +
 +/* Try to find the vendor of the current CPU, so we know what specific
 + * detection routine to call.
 + */
 +static enum gmx_cpuid_vendor
 +cpuid_check_vendor(void)
 +{
 +    enum gmx_cpuid_vendor      i,vendor;
 +    /* Register data used on x86 */
 +    unsigned int               eax,ebx,ecx,edx;
 +    char                       vendorstring[13];
 +
 +    /* Set default first */
 +    vendor = GMX_CPUID_VENDOR_UNKNOWN;
 +
++#ifdef GMX_CPUID_X86
 +    execute_x86cpuid(0x0,0,&eax,&ebx,&ecx,&edx);
 +
 +    memcpy(vendorstring,&ebx,4);
 +    memcpy(vendorstring+4,&edx,4);
 +    memcpy(vendorstring+8,&ecx,4);
 +
 +    vendorstring[12]='\0';
 +
 +    for(i=GMX_CPUID_VENDOR_UNKNOWN;i<GMX_CPUID_NVENDORS;i++)
 +    {
 +        if(!strncmp(vendorstring,gmx_cpuid_vendor_string[i],12))
 +        {
 +            vendor = i;
 +        }
 +    }
++#else
++    vendor = GMX_CPUID_VENDOR_UNKNOWN;
++#endif
++    
 +    return vendor;
 +}
 +
 +
 +
++int
++gmx_cpuid_topology(gmx_cpuid_t        cpuid,
++                   int *              nprocessors,
++                   int *              npackages,
++                   int *              ncores_per_package,
++                   int *              nhwthreads_per_core,
++                   const int **       package_id,
++                   const int **       core_id,
++                   const int **       hwthread_id,
++                   const int **       locality_order)
++{
++    int rc;
++    
++    if(cpuid->have_cpu_topology)
++    {
++        *nprocessors          = cpuid->nproc;
++        *npackages            = cpuid->npackages;
++        *ncores_per_package   = cpuid->ncores_per_package;
++        *nhwthreads_per_core  = cpuid->nhwthreads_per_core;
++        *package_id           = cpuid->package_id;
++        *core_id              = cpuid->core_id;
++        *hwthread_id          = cpuid->hwthread_id;
++        *locality_order       = cpuid->locality_order;
++        rc = 0;
++    }
++    else
++    {
++        rc = -1;
++    }
++    return rc;
++}
++
++
++enum gmx_cpuid_x86_smt
++gmx_cpuid_x86_smt(gmx_cpuid_t cpuid)
++{
++    enum gmx_cpuid_x86_smt rc;
++    
++    if(cpuid->have_cpu_topology)
++    {
++        rc = (cpuid->nhwthreads_per_core>1) ? GMX_CPUID_X86_SMT_ENABLED : GMX_CPUID_X86_SMT_DISABLED;
++    }
++    else if(cpuid->vendor==GMX_CPUID_VENDOR_AMD || gmx_cpuid_feature(cpuid,GMX_CPUID_FEATURE_X86_HTT)==0)
++    {
++        rc = GMX_CPUID_X86_SMT_DISABLED;
++    }
++    else
++    {
++        rc = GMX_CPUID_X86_SMT_CANNOTDETECT;
++    }
++    return rc;
++}
++
 +
 +int
 +gmx_cpuid_init               (gmx_cpuid_t *              pcpuid)
 +{
 +    gmx_cpuid_t cpuid;
 +    int i;
 +
 +    cpuid = malloc(sizeof(*cpuid));
 +
 +    *pcpuid = cpuid;
 +
 +    for(i=0;i<GMX_CPUID_NFEATURES;i++)
 +    {
 +        cpuid->feature[i]=0;
 +    }
++    cpuid->have_cpu_topology   = 0;
++    cpuid->nproc               = 0;
++    cpuid->npackages           = 0;
++    cpuid->ncores_per_package  = 0;
++    cpuid->nhwthreads_per_core = 0;
++    cpuid->package_id          = NULL;
++    cpuid->core_id             = NULL;
++    cpuid->hwthread_id         = NULL;
++    cpuid->locality_order      = NULL;
++    
 +    cpuid->vendor = cpuid_check_vendor();
++    
 +    switch(cpuid->vendor)
 +    {
++#ifdef GMX_CPUID_X86
 +        case GMX_CPUID_VENDOR_INTEL:
 +            cpuid_check_intel_x86(cpuid);
 +            break;
 +        case GMX_CPUID_VENDOR_AMD:
 +            cpuid_check_amd_x86(cpuid);
 +            break;
++#endif
 +        default:
 +            /* Could not find vendor */
 +            strncpy(cpuid->brand,"Unknown CPU brand",GMX_CPUID_BRAND_MAXLEN);
 +            cpuid->family         = 0;
 +            cpuid->model          = 0;
 +            cpuid->stepping       = 0;
- enum gmx_cpuid_x86_smt
- gmx_cpuid_x86_smt(gmx_cpuid_t cpuid)
- {
- #if (defined HAVE_SCHED_H && defined HAVE_SCHED_SETAFFINITY && defined HAVE_SYSCONF && defined __linux__)
-     int            i;
-     int            nproc;
-     cpu_set_t      cpuset,save_cpuset;
-     int *          apic_id;
-     unsigned int   eax,ebx,ecx,edx;
-     int            core_shift_bits;
-     int            smt_found;
-     if( gmx_cpuid_vendor(cpuid)!=GMX_CPUID_VENDOR_INTEL ||
-        gmx_cpuid_feature(cpuid,GMX_CPUID_FEATURE_X86_HTT)==0)
-     {
-         return GMX_CPUID_X86_SMT_DISABLED;
-     }
-     /* Check cpuid max standard function */
-     execute_x86cpuid(0x0,0,&eax,&ebx,&ecx,&edx);
-     /* Early CPUs that do not support function 11 do not support SMT either */
-     if(eax<0xB)
-     {
-         return GMX_CPUID_X86_SMT_DISABLED;
-     }
-     /* If we got here, it is a modern Intel CPU that supports detection, as does our OS */
-     /* How many processors? */
-     nproc = sysconf(_SC_NPROCESSORS_ONLN);
-     apic_id      = malloc(sizeof(int)*nproc);
-     sched_getaffinity(0,sizeof(cpu_set_t),&save_cpuset);
-     /* Get x2APIC ID from each hardware thread */
-     CPU_ZERO(&cpuset);
-     for(i=0;i<nproc;i++)
-     {
-         CPU_SET(i,&cpuset);
-         sched_setaffinity(0,sizeof(cpu_set_t),&cpuset);
-         execute_x86cpuid(0xB,0,&eax,&ebx,&ecx,&edx);
-         apic_id[i]=edx;
-         CPU_CLR(i,&cpuset);
-     }
-     /* Reset affinity to the value it had when calling this routine */
-     sched_setaffinity(0,sizeof(cpu_set_t),&save_cpuset);
-     core_shift_bits = eax & 0x1F;
-     /* Check if there is any other APIC id that is identical to [0], apart from
-      * the hardware thread bit.
-      */
-     smt_found  = 0;
-     for(i=1;i<nproc && smt_found==0;i++)
-     {
-         smt_found = (apic_id[i]>>core_shift_bits == apic_id[0] >> core_shift_bits);
-     }
-     free(apic_id);
-     if(smt_found==1)
-     {
-         return GMX_CPUID_X86_SMT_ENABLED;
-     }
-     else
-     {
-         return GMX_CPUID_X86_SMT_DISABLED;
-     }
- #else
-     /* Do the trivial stuff first. If Hyper-Threading isn't even supported it
-      * cannot be enabled, no matter what OS detection we use!
-      */
-     if(0==gmx_cpuid_feature(cpuid,GMX_CPUID_FEATURE_X86_HTT))
-     {
-         return GMX_CPUID_X86_SMT_DISABLED;
-     }
-     else
-     {
-         return GMX_CPUID_X86_SMT_CANNOTDETECT;
-     }
- #endif
- }
++            
 +            for(i=0;i<GMX_CPUID_NFEATURES;i++)
 +            {
 +                cpuid->feature[i]=0;
 +            }
 +            cpuid->feature[GMX_CPUID_FEATURE_CANNOTDETECT] = 1;
 +            break;
 +    }
 +
 +    return 0;
 +}
 +
 +
 +
 +void
 +gmx_cpuid_done               (gmx_cpuid_t              cpuid)
 +{
 +    free(cpuid);
 +}
 +
 +
 +int
 +gmx_cpuid_formatstring       (gmx_cpuid_t              cpuid,
 +                              char *                   str,
 +                              int                      n)
 +{
 +    int c;
 +    int i;
 +    enum gmx_cpuid_feature  feature;
 +
 +#ifdef _MSC_VER
 +    _snprintf(str,n,
 +              "Vendor: %s\n"
 +              "Brand:  %s\n"
 +              "Family: %2d  Model: %2d  Stepping: %2d\n"
 +              "Features:",
 +              gmx_cpuid_vendor_string[gmx_cpuid_vendor(cpuid)],
 +              gmx_cpuid_brand(cpuid),
 +              gmx_cpuid_family(cpuid),gmx_cpuid_model(cpuid),gmx_cpuid_stepping(cpuid));
 +#else
 +    snprintf(str,n,
 +             "Vendor: %s\n"
 +             "Brand:  %s\n"
 +             "Family: %2d  Model: %2d  Stepping: %2d\n"
 +             "Features:",
 +             gmx_cpuid_vendor_string[gmx_cpuid_vendor(cpuid)],
 +             gmx_cpuid_brand(cpuid),
 +             gmx_cpuid_family(cpuid),gmx_cpuid_model(cpuid),gmx_cpuid_stepping(cpuid));
 +#endif
 +
 +    str[n-1] = '\0';
 +    c = strlen(str);
 +    n   -= c;
 +    str += c;
 +
 +    for(feature=GMX_CPUID_FEATURE_CANNOTDETECT;feature<GMX_CPUID_NFEATURES;feature++)
 +    {
 +        if(gmx_cpuid_feature(cpuid,feature)==1)
 +        {
 +#ifdef _MSC_VER
 +            _snprintf(str,n," %s",gmx_cpuid_feature_string[feature]);
 +#else
 +            snprintf(str,n," %s",gmx_cpuid_feature_string[feature]);
 +#endif
 +            str[n-1] = '\0';
 +            c = strlen(str);
 +            n   -= c;
 +            str += c;
 +        }
 +    }
 +#ifdef _MSC_VER
 +    _snprintf(str,n,"\n");
 +#else
 +    snprintf(str,n,"\n");
 +#endif
 +    str[n-1] = '\0';
 +
 +    return 0;
 +}
 +
 +
 +
 +enum gmx_cpuid_acceleration
 +gmx_cpuid_acceleration_suggest  (gmx_cpuid_t                 cpuid)
 +{
 +    enum gmx_cpuid_acceleration  tmpacc;
 +
 +    tmpacc = GMX_CPUID_ACCELERATION_NONE;
 +
 +    if(gmx_cpuid_vendor(cpuid)==GMX_CPUID_VENDOR_INTEL)
 +    {
 +        if(gmx_cpuid_feature(cpuid,GMX_CPUID_FEATURE_X86_AVX))
 +        {
 +            tmpacc = GMX_CPUID_ACCELERATION_X86_AVX_256;
 +        }
 +        else if(gmx_cpuid_feature(cpuid,GMX_CPUID_FEATURE_X86_SSE4_1))
 +        {
 +            tmpacc = GMX_CPUID_ACCELERATION_X86_SSE4_1;
 +        }
 +        else if(gmx_cpuid_feature(cpuid,GMX_CPUID_FEATURE_X86_SSE2))
 +        {
 +            tmpacc = GMX_CPUID_ACCELERATION_X86_SSE2;
 +        }
 +    }
 +    else if(gmx_cpuid_vendor(cpuid)==GMX_CPUID_VENDOR_AMD)
 +    {
 +        if(gmx_cpuid_feature(cpuid,GMX_CPUID_FEATURE_X86_AVX))
 +        {
 +            tmpacc = GMX_CPUID_ACCELERATION_X86_AVX_128_FMA;
 +        }
 +        else if(gmx_cpuid_feature(cpuid,GMX_CPUID_FEATURE_X86_SSE4_1))
 +        {
 +            tmpacc = GMX_CPUID_ACCELERATION_X86_SSE4_1;
 +        }
 +        else if(gmx_cpuid_feature(cpuid,GMX_CPUID_FEATURE_X86_SSE2))
 +        {
 +            tmpacc = GMX_CPUID_ACCELERATION_X86_SSE2;
 +        }
 +    }
 +
 +    return tmpacc;
 +}
 +
 +
 +
 +int
 +gmx_cpuid_acceleration_check(gmx_cpuid_t   cpuid,
 +                             FILE *        log)
 +{
 +    int                           rc;
 +    char                          str[1024];
 +    enum gmx_cpuid_acceleration   acc;
 +
 +    acc = gmx_cpuid_acceleration_suggest(cpuid);
 +
 +    rc = (acc != compiled_acc);
 +
 +    gmx_cpuid_formatstring(cpuid,str,1023);
 +    str[1023] = '\0';
 +
 +    if(log!=NULL)
 +    {
 +        fprintf(log,
 +                "\nDetecting CPU-specific acceleration.\nPresent hardware specification:\n"
 +                "%s"
 +                "Acceleration most likely to fit this hardware: %s\n"
 +                "Acceleration selected at GROMACS compile time: %s\n\n",
 +                str,
 +                gmx_cpuid_acceleration_string[acc],
 +                gmx_cpuid_acceleration_string[compiled_acc]);
 +    }
 +
 +    if(rc!=0)
 +    {
 +        if(log!=NULL)
 +        {
 +        fprintf(log,"\nBinary not matching hardware - you might be losing performance.\n"
 +                "Acceleration most likely to fit this hardware: %s\n"
 +                "Acceleration selected at GROMACS compile time: %s\n\n",
 +                gmx_cpuid_acceleration_string[acc],
 +                gmx_cpuid_acceleration_string[compiled_acc]);
 +        }
 +        printf("Compiled acceleration: %s (Gromacs could use %s on this machine, which is better)\n",
 +               gmx_cpuid_acceleration_string[compiled_acc],
 +               gmx_cpuid_acceleration_string[acc]);
 +    }
 +    return rc;
 +}
 +
 +
 +
 +#ifdef GMX_CPUID_STANDALONE
 +/* Stand-alone program to enable queries of CPU features from Cmake.
 + * Note that you need to check inline ASM capabilities before compiling and set
 + * -DGMX_X86_GCC_INLINE_ASM for the cpuid instruction to work...
 + */
 +int
 +main(int argc, char **argv)
 +{
 +    gmx_cpuid_t                   cpuid;
 +    enum gmx_cpuid_acceleration   acc;
 +    int                           i,cnt;
 +
 +    if(argc<2)
 +    {
 +        fprintf(stdout,
 +                "Usage:\n\n%s [flags]\n\n"
 +                "Available flags:\n"
 +                "-vendor        Print CPU vendor.\n"
 +                "-brand         Print CPU brand string.\n"
 +                "-family        Print CPU family version.\n"
 +                "-model         Print CPU model version.\n"
 +                "-stepping      Print CPU stepping version.\n"
 +                "-features      Print CPU feature flags.\n"
 +                "-acceleration  Print suggested GROMACS acceleration.\n"
 +                ,argv[0]);
 +        exit(0);
 +    }
 +
 +    gmx_cpuid_init(&cpuid);
 +
 +    if(!strncmp(argv[1],"-vendor",3))
 +    {
 +        printf("%s\n",gmx_cpuid_vendor_string[cpuid->vendor]);
 +    }
 +    else if(!strncmp(argv[1],"-brand",3))
 +    {
 +        printf("%s\n",cpuid->brand);
 +    }
 +    else if(!strncmp(argv[1],"-family",3))
 +    {
 +        printf("%d\n",cpuid->family);
 +    }
 +    else if(!strncmp(argv[1],"-model",3))
 +    {
 +        printf("%d\n",cpuid->model);
 +    }
 +    else if(!strncmp(argv[1],"-stepping",3))
 +    {
 +        printf("%d\n",cpuid->stepping);
 +    }
 +    else if(!strncmp(argv[1],"-features",3))
 +    {
 +        cnt = 0;
 +        for(i=0;i<GMX_CPUID_NFEATURES;i++)
 +        {
 +            if(cpuid->feature[i]==1)
 +            {
 +                if(cnt++ > 0)
 +                {
 +                    printf(" ");
 +                }
 +                printf("%s",gmx_cpuid_feature_string[i]);
 +            }
 +        }
 +        printf("\n");
 +    }
 +    else if(!strncmp(argv[1],"-acceleration",3))
 +    {
 +        acc = gmx_cpuid_acceleration_suggest(cpuid);
 +        fprintf(stdout,"%s\n",gmx_cpuid_acceleration_string[acc]);
 +    }
 +
 +    gmx_cpuid_done(cpuid);
 +
 +
 +    return 0;
 +}
 +
 +#endif
index 9985f3dfb121658c23e79792007dc239c3ac8677,0000000000000000000000000000000000000000..e7c2d65e1e198742921394b90671956468d54023
mode 100644,000000..100644
--- /dev/null
@@@ -1,619 -1,0 +1,619 @@@
-         sprintf(sbuf, "%d GPU%s %sselected to be used for this run: ",
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + * This file is part of GROMACS.
 + * Copyright (c) 2012-  
 + *
 + * Written by the Gromacs development team under coordination of
 + * David van der Spoel, Berk Hess, and Erik Lindahl.
 + *
 + * This library is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the research papers on the package. Check out http://www.gromacs.org
 + * 
 + * And Hey:
 + * GROup of MAchos and Cynical Suckers
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <stdlib.h>
 +#include <assert.h>
 +#include <string.h>
 +
 +#include "types/enums.h"
 +#include "types/hw_info.h"
 +#include "types/commrec.h"
 +#include "gmx_fatal.h"
 +#include "gmx_fatal_collective.h"
 +#include "smalloc.h"
 +#include "gpu_utils.h"
 +#include "statutil.h"
 +#include "gmx_detect_hardware.h"
 +#include "main.h"
 +#include "md_logging.h"
 +
 +#if ((defined(WIN32) || defined( _WIN32 ) || defined(WIN64) || defined( _WIN64 )) && !(defined (__CYGWIN__) || defined (__CYGWIN32__)))
 +#include "windows.h"
 +#endif
 +
 +/* Although we can't have more than 10 GPU different ID-s passed by the user as
 + * the id-s are assumed to be represented by single digits, as multiple
 + * processes can share a GPU, we can end up with more than 10 IDs.
 + * To account for potential extreme cases we'll set the limit to a pretty
 + * ridiculous number. */
 +static unsigned int max_gpu_ids_user = 64;
 +
 +static const char* invalid_gpuid_hint =
 +    "A delimiter-free sequence of valid numeric IDs of available GPUs is expected.";
 +
 +/* FW decl. */
 +void limit_num_gpus_used(gmx_hw_info_t *hwinfo, int count);
 +
 +static void sprint_gpus(char *sbuf, const gmx_gpu_info_t *gpu_info, gmx_bool bPrintAll)
 +{
 +    int      i, ndev;
 +    char     stmp[STRLEN];
 +
 +    ndev = gpu_info->ncuda_dev;
 +
 +    sbuf[0] = '\0';
 +    for (i = 0; i < ndev; i++)
 +    {
 +        get_gpu_device_info_string(stmp, gpu_info, i);
 +        strcat(sbuf, "  ");
 +        strcat(sbuf, stmp);
 +        if (i < ndev - 1)
 +        {
 +            strcat(sbuf, "\n");
 +        }
 +    }
 +}
 +
 +static void print_gpu_detection_stats(FILE *fplog,
 +                                      const gmx_gpu_info_t *gpu_info,
 +                                      const t_commrec *cr)
 +{
 +    char onhost[266],stmp[STRLEN];
 +    int  ngpu;
 +
 +    ngpu = gpu_info->ncuda_dev;
 +
 +#if defined GMX_MPI && !defined GMX_THREAD_MPI
 +    /* We only print the detection on one, of possibly multiple, nodes */
 +    strncpy(onhost," on host ",10);
 +    gmx_gethostname(onhost+9,256);
 +#else
 +    /* We detect all relevant GPUs */
 +    strncpy(onhost,"",1);
 +#endif
 +
 +    if (ngpu > 0)
 +    {
 +        sprint_gpus(stmp, gpu_info, TRUE);
 +        md_print_warn(cr, fplog, "%d GPU%s detected%s:\n%s\n",
 +                      ngpu, (ngpu > 1) ? "s" : "", onhost, stmp);
 +    }
 +    else
 +    {
 +        md_print_warn(cr, fplog, "No GPUs detected%s\n", onhost);
 +    }
 +}
 +
 +static void print_gpu_use_stats(FILE *fplog,
 +                                const gmx_gpu_info_t *gpu_info,
 +                                const t_commrec *cr)
 +{
 +    char sbuf[STRLEN], stmp[STRLEN];
 +    int  i, ngpu, ngpu_all;
 +
 +    ngpu     = gpu_info->ncuda_dev_use;
 +    ngpu_all = gpu_info->ncuda_dev;
 +
 +    /* Issue note if GPUs are available but not used */
 +    if (ngpu_all > 0 && ngpu < 1)
 +    {
 +        sprintf(sbuf,
 +                "%d compatible GPU%s detected in the system, but none will be used.\n"
 +                "Consider trying GPU acceleration with the Verlet scheme!",
 +                ngpu_all, (ngpu_all > 1) ? "s" : "");
 +    }
 +    else
 +    {
-                       invalid_gpuid_hint, idstr[i]);
++        sprintf(sbuf, "%d GPU%s %sselected for this run: ",
 +                ngpu, (ngpu > 1) ? "s" : "",
 +                gpu_info->bUserSet ? "user-" : "auto-");
 +        for (i = 0; i < ngpu; i++)
 +        {
 +            sprintf(stmp, "#%d", get_gpu_device_id(gpu_info, i));
 +            if (i < ngpu - 1)
 +            {
 +                strcat(stmp, ", ");
 +            }
 +            strcat(sbuf, stmp);
 +        }
 +    }
 +    md_print_info(cr, fplog, "%s\n\n", sbuf);
 +}
 +
 +/* Parse a "plain" GPU ID string which contains a sequence of digits corresponding
 + * to GPU IDs; the order will indicate the process/tMPI thread - GPU assignment. */
 +static void parse_gpu_id_plain_string(const char *idstr, int *nid, int *idlist)
 +{
 +    int  i;
 +    size_t len_idstr;
 +
 +    len_idstr = strlen(idstr);
 +
 +    if (len_idstr > max_gpu_ids_user)
 +    {
 +        gmx_fatal(FARGS,"%d GPU IDs provided, but only at most %d are supported",
 +                  len_idstr, max_gpu_ids_user);
 +    }
 +
 +    *nid = len_idstr;
 +
 +    for (i = 0; i < *nid; i++)
 +    {
 +        if (idstr[i] < '0' || idstr[i] > '9')
 +        {
 +            gmx_fatal(FARGS, "Invalid character in GPU ID string: '%c'\n%s\n",
++                      idstr[i], invalid_gpuid_hint);
 +        }
 +        idlist[i] = idstr[i] - '0';
 +    }
 +}
 +
 +static void parse_gpu_id_csv_string(const char *idstr, int *nid, int *idlist)
 +{
 +    /* XXX implement cvs format to support more than 10 different GPUs in a box. */
 +    gmx_incons("Not implemented yet");
 +}
 +
 +void gmx_check_hw_runconf_consistency(FILE *fplog, gmx_hw_info_t *hwinfo,
 +                                      const t_commrec *cr, int ntmpi_requested,
 +                                      gmx_bool bUseGPU)
 +{
 +    int      npppn, ntmpi_pp, ngpu;
 +    char     sbuf[STRLEN], th_or_proc[STRLEN], th_or_proc_plural[STRLEN], pernode[STRLEN];
 +    char     gpu_plural[2];
 +    gmx_bool bGPUBin, btMPI, bMPI, bMaxMpiThreadsSet, bNthreadsAuto, bEmulateGPU;
 +
 +    assert(hwinfo);
 +    assert(cr);
 +
 +    btMPI = bMPI = FALSE;
 +    bNthreadsAuto = FALSE;
 +#if defined(GMX_THREAD_MPI)
 +    btMPI = TRUE;
 +    bNthreadsAuto = (ntmpi_requested < 1);
 +#elif defined(GMX_LIB_MPI)
 +    bMPI  = TRUE;
 +#endif
 +
 +#ifdef GMX_GPU
 +    bGPUBin      = TRUE;
 +#else
 +    bGPUBin      = FALSE;
 +#endif
 +
 +    /* GPU emulation detection is done later, but we need here as well
 +     * -- uncool, but there's no elegant workaround */
 +    bEmulateGPU       = (getenv("GMX_EMULATE_GPU") != NULL);
 +    bMaxMpiThreadsSet = (getenv("GMX_MAX_MPI_THREADS") != NULL);
 +
 +    if (SIMMASTER(cr))
 +    {
 +        /* check the acceleration mdrun is compiled with against hardware capabilities */
 +        /* TODO: Here we assume homogeneous hardware which is not necessarily the case!
 +         *       Might not hurt to add an extra check over MPI. */
 +        gmx_cpuid_acceleration_check(hwinfo->cpuid_info, fplog);
 +    }
 +
 +    /* Below we only do consistency checks for PP and GPUs,
 +     * this is irrelevant for PME only nodes, so in that case we return here.
 +     */
 +    if (!(cr->duty & DUTY_PP))
 +    {
 +        return;
 +    }
 +
 +    /* Need to ensure that we have enough GPUs:
 +     * - need one GPU per PP node
 +     * - no GPU oversubscription with tMPI
 +     * => keep on the GPU support, otherwise turn off (or bail if forced)
 +     * */
 +    /* number of PP processes per node */
 +    npppn = cr->nrank_pp_intranode;
 +
 +    pernode[0] = '\0';
 +    th_or_proc_plural[0] = '\0';
 +    if (btMPI)
 +    {
 +        sprintf(th_or_proc, "thread-MPI thread");
 +        if (npppn > 1)
 +        {
 +            sprintf(th_or_proc_plural, "s");
 +        }
 +    }
 +    else if (bMPI)
 +    {
 +        sprintf(th_or_proc, "MPI process");
 +        if (npppn > 1)
 +        {
 +            sprintf(th_or_proc_plural, "es");
 +        }
 +        sprintf(pernode, " per node");
 +    }
 +    else
 +    {
 +        /* neither MPI nor tMPI */
 +        sprintf(th_or_proc, "process");
 +    }
 +
 +    if (bGPUBin)
 +    {
 +        print_gpu_detection_stats(fplog, &hwinfo->gpu_info, cr);
 +    }
 +
 +    if (bUseGPU && hwinfo->bCanUseGPU && !bEmulateGPU)
 +    {
 +        ngpu = hwinfo->gpu_info.ncuda_dev_use;
 +        sprintf(gpu_plural, "%s", (ngpu > 1) ? "s" : "");
 +
 +        /* number of tMPI threads atuo-adjusted */
 +        if (btMPI && bNthreadsAuto && SIMMASTER(cr))
 +        {
 +            if (npppn < ngpu)
 +            {
 +                if (hwinfo->gpu_info.bUserSet)
 +                {
 +                    /* The user manually provided more GPUs than threads we could
 +                     * automatically start. */
 +                    gmx_fatal(FARGS,
 +                              "%d GPU%s provided, but only %d PP thread-MPI thread%s coud be started.\n"
 +                              "%s requires one PP tread-MPI thread per GPU; use fewer GPUs%s.",
 +                              ngpu, gpu_plural, npppn, th_or_proc_plural,
 +                              ShortProgram(), bMaxMpiThreadsSet ? "\nor allow more threads to be used" : "");
 +                }
 +                else
 +                {
 +                    /* There are more GPUs than tMPI threads; we have to limit the number GPUs used. */
 +                    md_print_warn(cr,fplog,
 +                                  "NOTE: %d GPU%s were detected, but only %d PP thread-MPI thread%s can be started.\n"
 +                                  "      %s can use one GPU per PP tread-MPI thread, so only %d GPU%s will be used.%s\n",
 +                                  ngpu, gpu_plural, npppn, th_or_proc_plural,
 +                                  ShortProgram(), npppn, npppn > 1 ? "s" : "",
 +                                  bMaxMpiThreadsSet ? "\n      Also, you can allow more threads to be used by increasing GMX_MAX_MPI_THREADS" : "");
 +
 +                    if (cr->rank_pp_intranode == 0)
 +                    {
 +                        limit_num_gpus_used(hwinfo, npppn);
 +                        ngpu = hwinfo->gpu_info.ncuda_dev_use;
 +                        sprintf(gpu_plural, "%s", (ngpu > 1) ? "s" : "");
 +                    }
 +                }
 +            }
 +        }
 +
 +        if (ngpu != npppn)
 +        {
 +            if (hwinfo->gpu_info.bUserSet)
 +            {
 +                gmx_fatal(FARGS,
 +                          "Incorrect launch configuration: mismatching number of PP %s%s and GPUs%s.\n"
 +                          "%s was started with %d PP %s%s%s, but you provided %d GPU%s.",
 +                          th_or_proc, btMPI ? "s" : "es" , pernode,
 +                          ShortProgram(), npppn, th_or_proc, th_or_proc_plural, pernode, ngpu, gpu_plural);
 +            }
 +            else
 +            {
 +                if (ngpu > npppn)
 +                {
 +                    md_print_warn(cr,fplog,
 +                                  "NOTE: potentially sub-optimal launch configuration, %s started with less\n"
 +                                  "      PP %s%s%s than GPU%s available.\n"
 +                                  "      Each PP %s can only use one GPU, so only %d GPU%s%s will be used.",
 +                                  ShortProgram(),
 +                                  th_or_proc, th_or_proc_plural, pernode, gpu_plural,
 +                                  th_or_proc, npppn, gpu_plural, pernode);
 +
 +                    if (bMPI || (btMPI && cr->rank_pp_intranode == 0))
 +                    {
 +                        limit_num_gpus_used(hwinfo, npppn);
 +                        ngpu = hwinfo->gpu_info.ncuda_dev_use;
 +                        sprintf(gpu_plural, "%s", (ngpu > 1) ? "s" : "");
 +                    }
 +                }
 +                else
 +                {
 +                    /* Avoid duplicate error messages.
 +                     * Unfortunately we can only do this at the physical node
 +                     * level, since the hardware setup and MPI process count
 +                     * might be differ over physical nodes.
 +                     */
 +                    if (cr->rank_pp_intranode == 0)
 +                    {
 +                        gmx_fatal(FARGS,
 +                                  "Incorrect launch configuration: mismatching number of PP %s%s and GPUs%s.\n"
 +                                  "%s was started with %d PP %s%s%s, but only %d GPU%s were detected.",
 +                                  th_or_proc, btMPI ? "s" : "es" , pernode,
 +                                  ShortProgram(), npppn, th_or_proc, th_or_proc_plural, pernode, ngpu, gpu_plural);
 +                    }
 +#ifdef GMX_MPI
 +                    else
 +                    {
 +                        /* Avoid other ranks to continue after inconsistency */
 +                        MPI_Barrier(cr->mpi_comm_mygroup);
 +                    }
 +#endif
 +                }
 +            }
 +        }
 +
 +        if (hwinfo->gpu_info.bUserSet && (cr->rank_pp_intranode == 0))
 +        {
 +            int i, j, same_count;
 +            gmx_bool bSomeSame, bAllDifferent;
 +
 +            same_count = 0;
 +            bSomeSame = FALSE;
 +            bAllDifferent = TRUE;
 +
 +            for (i = 0; i < ngpu - 1; i++)
 +            {
 +                for (j = i + 1; j < ngpu; j++)
 +                {
 +                    bSomeSame       |= hwinfo->gpu_info.cuda_dev_use[i] == hwinfo->gpu_info.cuda_dev_use[j];
 +                    bAllDifferent   &= hwinfo->gpu_info.cuda_dev_use[i] != hwinfo->gpu_info.cuda_dev_use[j];
 +                    same_count      += hwinfo->gpu_info.cuda_dev_use[i] == hwinfo->gpu_info.cuda_dev_use[j];
 +                }
 +            }
 +
 +            if (btMPI && !bAllDifferent)
 +            {
 +                gmx_fatal(FARGS,
 +                          "Invalid GPU assignment: can't share a GPU among multiple thread-MPI threads.\n"
 +                          "Use MPI if you are sure that you want to assign GPU to multiple threads.");
 +            }
 +
 +            if (bSomeSame)
 +            {
 +                md_print_warn(cr,fplog,
 +                              "NOTE: Potentially sub-optimal launch configuration: you assigned %s to\n"
 +                              "      multiple %s%s; this should be avoided as it generally\n"
 +                              "      causes performance loss.",
 +                              same_count > 1 ? "GPUs" : "a GPU", th_or_proc, btMPI ? "s" : "es");
 +            }
 +        }
 +        print_gpu_use_stats(fplog, &hwinfo->gpu_info, cr);
 +    }
 +}
 +
 +/* Return the number of hardware threads supported by the current CPU.
 + * We assume that this is equal with the number of CPUs reported to be
 + * online by the OS at the time of the call.
 + */
 +static int get_nthreads_hw_avail(FILE *fplog, const t_commrec *cr)
 +{
 +     int ret = 0;
 +
 +#if ((defined(WIN32) || defined( _WIN32 ) || defined(WIN64) || defined( _WIN64 )) && !(defined (__CYGWIN__) || defined (__CYGWIN32__)))
 +    /* Windows */
 +    SYSTEM_INFO sysinfo;
 +    GetSystemInfo( &sysinfo );
 +    ret = sysinfo.dwNumberOfProcessors;
 +#elif defined HAVE_SYSCONF
 +    /* We are probably on Unix.
 +     * Now check if we have the argument to use before executing the call
 +     */
 +#if defined(_SC_NPROCESSORS_ONLN)
 +    ret = sysconf(_SC_NPROCESSORS_ONLN);
 +#elif defined(_SC_NPROC_ONLN)
 +    ret = sysconf(_SC_NPROC_ONLN);
 +#elif defined(_SC_NPROCESSORS_CONF)
 +    ret = sysconf(_SC_NPROCESSORS_CONF);
 +#elif defined(_SC_NPROC_CONF)
 +    ret = sysconf(_SC_NPROC_CONF);
 +#endif /* End of check for sysconf argument values */
 +
 +#else
 +    /* Neither windows nor Unix. No fscking idea how many CPUs we have! */
 +    ret = -1;
 +#endif
 +
 +    if (debug)
 +    {
 +        fprintf(debug, "Detected %d processors, will use this as the number "
 +                "of supported hardware threads.\n", ret);
 +    }
 +
 +#ifdef GMX_OMPENMP
 +    if (ret != gmx_omp_get_num_procs())
 +    {
 +        md_print_warn(cr, fplog,
 +                      "Number of CPUs detected (%d) does not match the number reported by OpenMP (%d).\n"
 +                      "Consider setting the launch configuration manually!",
 +                      ret, gmx_omp_get_num_procs());
 +    }
 +#endif
 +
 +    return ret;
 +}
 +
 +void gmx_detect_hardware(FILE *fplog, gmx_hw_info_t *hwinfo,
 +                         const t_commrec *cr,
 +                         gmx_bool bForceUseGPU, gmx_bool bTryUseGPU,
 +                         const char *gpu_id)
 +{
 +    int             i;
 +    const char      *env;
 +    char            sbuf[STRLEN], stmp[STRLEN];
 +    gmx_hw_info_t   *hw;
 +    gmx_gpu_info_t  gpuinfo_auto, gpuinfo_user;
 +    gmx_bool        bGPUBin;
 +
 +    assert(hwinfo);
 +
 +    /* detect CPUID info; no fuss, we don't detect system-wide
 +     * -- sloppy, but that's it for now */
 +    if (gmx_cpuid_init(&hwinfo->cpuid_info) != 0)
 +    {
 +        gmx_fatal_collective(FARGS, cr, NULL, "CPUID detection failed!");
 +    }
 +
 +    /* detect number of hardware threads */
 +    hwinfo->nthreads_hw_avail = get_nthreads_hw_avail(fplog, cr);
 +
 +    /* detect GPUs */
 +    hwinfo->gpu_info.ncuda_dev_use  = 0;
 +    hwinfo->gpu_info.cuda_dev_use   = NULL;
 +    hwinfo->gpu_info.ncuda_dev      = 0;
 +    hwinfo->gpu_info.cuda_dev       = NULL;
 +
 +#ifdef GMX_GPU
 +    bGPUBin      = TRUE;
 +#else
 +    bGPUBin      = FALSE;
 +#endif
 +
 +    /* Bail if binary is not compiled with GPU acceleration, but this is either
 +     * explicitly (-nb gpu) or implicitly (gpu ID passed) requested. */
 +    if (bForceUseGPU && !bGPUBin)
 +    {
 +        gmx_fatal(FARGS, "GPU acceleration requested, but %s was compiled without GPU support!", ShortProgram());
 +    }
 +    if (gpu_id != NULL && !bGPUBin)
 +    {
 +        gmx_fatal(FARGS, "GPU ID string set, but %s was compiled without GPU support!", ShortProgram());
 +    }
 +
 +    /* run the detection if the binary was compiled with GPU support */
 +    if (bGPUBin && getenv("GMX_DISABLE_GPU_DETECTION")==NULL)
 +    {
 +        char detection_error[STRLEN];
 +
 +        if (detect_cuda_gpus(&hwinfo->gpu_info, detection_error) != 0)
 +        {
 +            if (detection_error != NULL && detection_error[0] != '\0')
 +            {
 +                sprintf(sbuf, ":\n      %s\n", detection_error);
 +            }
 +            else
 +            {
 +                sprintf(sbuf, ".");
 +            }
 +            md_print_warn(cr, fplog,
 +                          "NOTE: Error occurred during GPU detection%s"
 +                          "      Can not use GPU acceleration, will fall back to CPU kernels.\n",
 +                          sbuf);
 +        }
 +    }
 +
 +    if (bForceUseGPU || bTryUseGPU)
 +    {
 +        env = getenv("GMX_GPU_ID");
 +        if (env != NULL && gpu_id != NULL)
 +        {
 +            gmx_fatal(FARGS,"GMX_GPU_ID and -gpu_id can not be used at the same time");
 +        }
 +        if (env == NULL)
 +        {
 +            env = gpu_id;
 +        }
 +
 +        /* parse GPU IDs if the user passed any */
 +        if (env != NULL)
 +        {
 +            int *gpuid, *checkres;
 +            int nid, res;
 +
 +            snew(gpuid, max_gpu_ids_user);
 +            snew(checkres, max_gpu_ids_user);
 +
 +            parse_gpu_id_plain_string(env, &nid, gpuid);
 +
 +            if (nid == 0)
 +            {
 +                gmx_fatal(FARGS, "Empty GPU ID string encountered.\n%s\n", invalid_gpuid_hint);
 +            }
 +
 +            res = check_select_cuda_gpus(checkres, &hwinfo->gpu_info, gpuid, nid);
 +
 +            if (!res)
 +            {
 +                print_gpu_detection_stats(fplog, &hwinfo->gpu_info, cr);
 +
 +                sprintf(sbuf, "Some of the requested GPUs do not exist, behave strangely, or are not compatible:\n");
 +                for (i = 0; i < nid; i++)
 +                {
 +                    if (checkres[i] != egpuCompatible)
 +                    {
 +                        sprintf(stmp, "    GPU #%d: %s\n",
 +                                gpuid[i], gpu_detect_res_str[checkres[i]]);
 +                        strcat(sbuf, stmp);
 +                    }
 +                }
 +                gmx_fatal(FARGS, "%s", sbuf);
 +            }
 +
 +            hwinfo->gpu_info.bUserSet = TRUE;
 +
 +            sfree(gpuid);
 +            sfree(checkres);
 +        }
 +        else
 +        {
 +            pick_compatible_gpus(&hwinfo->gpu_info);
 +            hwinfo->gpu_info.bUserSet = FALSE;
 +        }
 +
 +        /* decide whether we can use GPU */
 +        hwinfo->bCanUseGPU = (hwinfo->gpu_info.ncuda_dev_use > 0);
 +        if (!hwinfo->bCanUseGPU && bForceUseGPU)
 +        {
 +            gmx_fatal(FARGS, "GPU acceleration requested, but no compatible GPUs were detected.");
 +        }
 +    }
 +}
 +
 +void limit_num_gpus_used(gmx_hw_info_t *hwinfo, int count)
 +{
 +    int ndev_use;
 +
 +    assert(hwinfo);
 +
 +    ndev_use = hwinfo->gpu_info.ncuda_dev_use;
 +
 +    if (count > ndev_use)
 +    {
 +        /* won't increase the # of GPUs */
 +        return;
 +    }
 +
 +    if (count < 1)
 +    {
 +        char sbuf[STRLEN];
 +        sprintf(sbuf, "Limiting the number of GPUs to <1 doesn't make sense (detected %d, %d requested)!",
 +                ndev_use, count);
 +        gmx_incons(sbuf);
 +    }
 +
 +    /* TODO: improve this implementation: either sort GPUs or remove the weakest here */
 +    hwinfo->gpu_info.ncuda_dev_use = count;
 +}
 +
 +void gmx_hardware_info_free(gmx_hw_info_t *hwinfo)
 +{
 +    if (hwinfo)
 +    {
 +        gmx_cpuid_done(hwinfo->cpuid_info);
 +        free_gpu_info(&hwinfo->gpu_info);
 +        sfree(hwinfo);
 +    }
 +}
index 67a6e207c731536e4e83d22d8e4e195f8be965dc,0000000000000000000000000000000000000000..673025b7697dad412210da03097c2329f1b1cb78
mode 100644,000000..100644
--- /dev/null
@@@ -1,447 -1,0 +1,447 @@@
-     if (SIMMASTER(cr) && bOMP)
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2010, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +
 +#include <stdio.h>
 +#include <stdlib.h>
 +#include <string.h>
 +#include <assert.h>
 +
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include "gmx_fatal.h"
 +#include "typedefs.h"
 +#include "macros.h"
 +#include "network.h"
 +#include "statutil.h"
 +#include "gmx_omp.h"
 +#include "gmx_omp_nthreads.h"
 +#include "md_logging.h"
 +
 +/** Structure with the number of threads for each OpenMP multi-threaded
 + *  algorithmic module in mdrun. */
 +typedef struct
 +{
 +    int gnth;               /**< Global num. of threads per PP or PP+PME process/tMPI thread. */
 +    int gnth_pme;           /**< Global num. of threads per PME only process/tMPI thread. */
 +
 +    int nth[emntNR];        /**< Number of threads for each module, indexed with module_nth_t */
 +    gmx_bool initialized;   /**< TRUE if the module as been initialized. */
 +} omp_module_nthreads_t;
 +
 +/** Names of environment variables to set the per module number of threads.
 + *
 + *  Indexed with the values of module_nth_t.
 + * */
 +static const char *modth_env_var[emntNR] =
 +{
 +    "GMX_DEFAULT_NUM_THREADS should never be set",
 +    "GMX_DOMDEC_NUM_THREADS", "GMX_PAIRSEARCH_NUM_THREADS",
 +    "GMX_NONBONDED_NUM_THREADS", "GMX_BONDED_NUM_THREADS",
 +    "GMX_PME_NUM_THREADS", "GMX_UPDATE_NUM_THREADS",
 +    "GMX_VSITE_NUM_THREADS",
 +    "GMX_LINCS_NUM_THREADS", "GMX_SETTLE_NUM_THREADS"
 +};
 +
 +/** Names of the modules. */
 +static const char *mod_name[emntNR] =
 +{
 +    "default", "domain decomposition", "pair search", "non-bonded",
 +    "bonded", "PME", "update", "LINCS", "SETTLE"
 +};
 +
 +/** Number of threads for each algorithmic module.
 + *
 + *  File-scope global variable that gets set once in \init_module_nthreads
 + *  and queried via gmx_omp_nthreads_get.
 + *
 + *  All fields are initialized to 0 which should result in errors if
 + *  the init call is omitted.
 + * */
 +static omp_module_nthreads_t modth = { 0, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0}, FALSE};
 +
 +
 +/** Determine the number of threads for module \mod.
 + *
 + *  \m takes values form the module_nth_t enum and maps these to the
 + *  corresponding value in modth_env_var.
 + *
 + *  Each number of threads per module takes the default value unless
 + *  GMX_*_NUM_THERADS env var is set, case in which its value overrides
 + *  the deafult.
 + *
 + *  The "group" scheme supports OpenMP only in PME and in thise case all but
 + *  the PME nthread values default to 1.
 + */
 +static int pick_module_nthreads(FILE *fplog, int m,
 +                                gmx_bool bSimMaster,
 +                                gmx_bool bFullOmpSupport,
 +                                gmx_bool bSepPME)
 +{
 +    char *env;
 +    int  nth;
 +    char sbuf[STRLEN];
 +    gmx_bool bOMP;
 +
 +#ifdef GMX_OPENMP
 +    bOMP = TRUE;
 +#else
 +    bOMP = FALSE;
 +#endif /* GMX_OPENMP */
 +
 +    /* The default should never be set through a GMX_*_NUM_THREADS env var
 +     * as it's always equal with gnth. */
 +    if (m == emntDefault)
 +    {
 +        return modth.nth[emntDefault];
 +    }
 +
 +    /* check the environment variable */
 +    if ((env = getenv(modth_env_var[m])) != NULL)
 +    {
 +        sscanf(env, "%d", &nth);
 +
 +        if (!bOMP)
 +        {
 +            gmx_warning("%s=%d is set, but %s is compiled without OpenMP!",
 +                        modth_env_var[m], nth, ShortProgram());
 +        }
 +
 +        /* with the verlet codepath, when any GMX_*_NUM_THREADS env var is set,
 +         * OMP_NUM_THREADS also has to be set */
 +        if (bFullOmpSupport && getenv("OMP_NUM_THREADS") == NULL)
 +        {
 +            gmx_fatal(FARGS, "%s=%d is set, the default number of threads also "
 +                      "needs to be set with OMP_NUM_THREADS!",
 +                      modth_env_var[m], nth);
 +        }
 +
 +        /* with the group scheme warn if any env var except PME is set */
 +        if (!bFullOmpSupport)
 +        {
 +            if (m != emntPME)
 +            {
 +                gmx_warning("%s=%d is set, but OpenMP multithreading is not "
 +                            "supported in %s!",
 +                            modth_env_var[m], nth, mod_name[m]);
 +                nth = 1;
 +            }
 +        }
 +
 +        /* only babble if we are really overriding with a different value */
 +        if ((bSepPME && m == emntPME && nth != modth.gnth_pme) || (nth != modth.gnth))
 +        {
 +            sprintf(sbuf, "%s=%d set, overriding the default number of %s threads",
 +                    modth_env_var[m], nth, mod_name[m]);
 +            if (bSimMaster)
 +            {
 +                fprintf(stderr, "\n%s\n", sbuf);
 +            }
 +            if (fplog)
 +            {
 +                fprintf(fplog, "%s\n", sbuf);
 +            }
 +        }
 +    }
 +    else
 +    {
 +        /* pick the global PME node nthreads if we are setting the number
 +         * of threads in separate PME nodes  */
 +        nth = (bSepPME && m == emntPME) ? modth.gnth_pme : modth.gnth;
 +    }
 +
 +    return modth.nth[m] = nth;
 +}
 +
 +void gmx_omp_nthreads_read_env(int *nthreads_omp)
 +{
 +    char *env;
 +
 +    assert(nthreads_omp);
 +
 +    if ((env = getenv("OMP_NUM_THREADS")) != NULL)
 +    {
 +        int nt_omp;
 +
 +        sscanf(env,"%d",&nt_omp);
 +        if (nt_omp <= 0)
 +        {
 +            gmx_fatal(FARGS,"OMP_NUM_THREADS is invalid: '%s'",env);
 +        }
 +
 +        if (*nthreads_omp > 0 && nt_omp != *nthreads_omp)
 +        {
 +            gmx_fatal(FARGS,"OMP_NUM_THREADS (%d) and the number of threads requested on the command line (%d) have different values",nt_omp,*nthreads_omp);
 +        }
 +
 +        /* Setting the number of OpenMP threads.
 +         * NOTE: with tMPI this function is only called on the master node,
 +         * but with MPI on all nodes which means lots of messages on stderr.
 +         */
 +        fprintf(stderr,"Getting the number of OpenMP threads from OMP_NUM_THREADS: %d\n",nt_omp);
 +        *nthreads_omp = nt_omp;
 +    }
 +}
 +
 +void gmx_omp_nthreads_init(FILE *fplog, t_commrec *cr,
 +                           int nthreads_hw_avail,
 +                           int omp_nthreads_req,
 +                           int omp_nthreads_pme_req,
 +                           gmx_bool bThisNodePMEOnly,
 +                           gmx_bool bFullOmpSupport)
 +{
 +    int  nth, nth_pmeonly, gmx_maxth, nppn;
 +    char *env;
 +    gmx_bool bSepPME, bOMP;
 +
 +#ifdef GMX_OPENMP
 +    bOMP = TRUE;
 +#else
 +    bOMP = FALSE;
 +#endif /* GMX_OPENMP */
 +
 +    /* number of MPI processes/threads per physical node */
 +    nppn = cr->nrank_intranode;
 +
 +    bSepPME = ( (cr->duty & DUTY_PP) && !(cr->duty & DUTY_PME)) ||
 +              (!(cr->duty & DUTY_PP) &&  (cr->duty & DUTY_PME));
 +
 +#ifdef GMX_THREAD_MPI
 +    /* modth is shared among tMPI threads, so for thread safety do the
 +     * detection is done on the master only. It is not thread-safe with
 +     * multiple simulations, but that's anyway not supported by tMPI. */
 +    if (SIMMASTER(cr))
 +#endif
 +    {
 +        /* just return if the initialization has already been done */
 +        if (modth.initialized)
 +        {
 +            return;
 +        }
 +
 +        /* With full OpenMP support (verlet scheme) set the number of threads
 +         * per process / default:
 +         * - 1 if not compiled with OpenMP or
 +         * - OMP_NUM_THREADS if the env. var is set, or
 +         * - omp_nthreads_req = #of threads requested by the user on the mdrun
 +         *   command line, otherwise
 +         * - take the max number of available threads and distribute them
 +         *   on the processes/tMPI threads.
 +         * ~ The GMX_*_NUM_THREADS env var overrides the number of threads of
 +         *   the respective module and it has to be used in conjunction with
 +         *   OMP_NUM_THREADS.
 +         *
 +         * With the group scheme OpenMP multithreading is only supported in PME,
 +         * for all other modules nthreads is set to 1.
 +         * The number of PME threads is equal to:
 +         * - 1 if not compiled with OpenMP or
 +         * - GMX_PME_NUM_THREADS if defined, otherwise
 +         * - OMP_NUM_THREADS if defined, otherwise
 +         * - 1
 +         */
 +        nth = 1;
 +        if ((env = getenv("OMP_NUM_THREADS")) != NULL)
 +        {
 +            if (!bOMP && (strncmp(env, "1", 1) != 0))
 +            {
 +                gmx_warning("OMP_NUM_THREADS is set, but %s was compiled without OpenMP support!",
 +                            ShortProgram());
 +            }
 +            else
 +            {
 +                nth = gmx_omp_get_max_threads();
 +            }
 +        }
 +        else if (omp_nthreads_req > 0)
 +        {
 +            nth = omp_nthreads_req;
 +        }
 +        else if (bFullOmpSupport && bOMP)
 +        {
 +            /* max available threads per node */
 +            nth = nthreads_hw_avail;
 +
 +            /* divide the threads among the MPI processes/tMPI threads */
 +            if (nth >= nppn)
 +            {
 +                nth /= nppn;
 +            }
 +            else
 +            {
 +                nth = 1;
 +            }
 +        }
 +
 +        /* now we have the global values, set them:
 +         * - 1 if not compiled with OpenMP and for the group scheme
 +         * - nth for the verlet scheme when compiled with OpenMP
 +         */
 +        if (bFullOmpSupport && bOMP)
 +        {
 +            modth.gnth = nth;
 +        }
 +        else
 +        {
 +            modth.gnth = 1;
 +        }
 +
 +        if (bSepPME)
 +        {
 +            if (omp_nthreads_pme_req > 0)
 +            {
 +                modth.gnth_pme = omp_nthreads_pme_req;
 +            }
 +            else
 +            {
 +                modth.gnth_pme = nth;
 +            }
 +        }
 +        else
 +        {
 +            modth.gnth_pme = 0;
 +        }
 +
 +        /* now set the per-module values */
 +        modth.nth[emntDefault] = modth.gnth;
 +        pick_module_nthreads(fplog, emntDomdec, SIMMASTER(cr), bFullOmpSupport, bSepPME);
 +        pick_module_nthreads(fplog, emntPairsearch, SIMMASTER(cr), bFullOmpSupport, bSepPME);
 +        pick_module_nthreads(fplog, emntNonbonded, SIMMASTER(cr), bFullOmpSupport, bSepPME);
 +        pick_module_nthreads(fplog, emntBonded, SIMMASTER(cr), bFullOmpSupport, bSepPME);
 +        pick_module_nthreads(fplog, emntPME, SIMMASTER(cr), bFullOmpSupport, bSepPME);
 +        pick_module_nthreads(fplog, emntUpdate, SIMMASTER(cr), bFullOmpSupport, bSepPME);
 +        pick_module_nthreads(fplog, emntVSITE, SIMMASTER(cr), bFullOmpSupport, bSepPME);
 +        pick_module_nthreads(fplog, emntLINCS, SIMMASTER(cr), bFullOmpSupport, bSepPME);
 +        pick_module_nthreads(fplog, emntSETTLE, SIMMASTER(cr), bFullOmpSupport, bSepPME);
 +
 +        /* set the number of threads globally */
 +        if (bOMP)
 +        {
 +#ifndef GMX_THREAD_MPI
 +            if (bThisNodePMEOnly)
 +            {
 +                gmx_omp_set_num_threads(modth.gnth_pme);
 +            }
 +            else
 +#endif /* GMX_THREAD_MPI */
 +            {
 +                if (bFullOmpSupport)
 +                {
 +                    gmx_omp_set_num_threads(nth);
 +                }
 +                else
 +                {
 +                    gmx_omp_set_num_threads(1);
 +                }
 +            }
 +        }
 +
 +        modth.initialized = TRUE;
 +    }
 +#ifdef GMX_THREAD_MPI
 +    /* Non-master threads have to wait for the detection to be done. */
 +    if (PAR(cr))
 +    {
 +        MPI_Barrier(cr->mpi_comm_mysim);
 +    }
 +#endif
 +
 +    /* inform the user about the settings */
-             fprintf(stderr, "Using %d OpenMP thread%s %s\n",
-                     modth.gnth,modth.gnth > 1 ? "s" : "",
-                     cr->nnodes > 1 ? mpi_str : "");
++    if (bOMP)
 +    {
 +#ifdef GMX_THREAD_MPI
 +        const char *mpi_str="per tMPI thread";
 +#else
 +        const char *mpi_str="per MPI process";
 +#endif
 +
 +        /* for group scheme we print PME threads info only */
 +        if (bFullOmpSupport)
 +        {
-             fprintf(stderr, "Using %d OpenMP thread%s %s for PME\n",
-                     modth.gnth_pme,modth.gnth_pme > 1 ? "s" : "",
-                     cr->nnodes > 1 ? mpi_str : "");
++            md_print_info(cr, fplog, "Using %d OpenMP thread%s %s\n",
++                          modth.gnth,modth.gnth > 1 ? "s" : "",
++                          cr->nnodes > 1 ? mpi_str : "");
 +        }
 +        if (bSepPME && modth.gnth_pme != modth.gnth)
 +        {
++            md_print_info(cr, fplog, "Using %d OpenMP thread%s %s for PME\n",
++                          modth.gnth_pme,modth.gnth_pme > 1 ? "s" : "",
++                          cr->nnodes > 1 ? mpi_str : "");
 +        }
 +    }
 +
 +    /* detect and warn about oversubscription
 +     * TODO: enable this for separate PME nodes as well! */
 +    if (!bSepPME && cr->rank_pp_intranode == 0)
 +    {
 +        char sbuf[STRLEN], sbuf1[STRLEN], sbuf2[STRLEN];
 +
 +        if (modth.gnth*nppn > nthreads_hw_avail)
 +        {
 +            sprintf(sbuf, "threads");
 +            sbuf1[0] = '\0';
 +            sprintf(sbuf2, "O");
 +#ifdef GMX_MPI
 +            if (modth.gnth == 1)
 +            {
 +#ifdef GMX_THREAD_MPI
 +                sprintf(sbuf, "thread-MPI threads");
 +#else
 +                sprintf(sbuf, "MPI processes");
 +                sprintf(sbuf1, " per node");
 +                sprintf(sbuf2, "On node %d: o", cr->sim_nodeid);
 +#endif
 +            }
 +#endif
 +            md_print_warn(cr, fplog,
 +                          "WARNING: %sversubscribing the available %d logical CPU cores%s with %d %s.\n"
 +                          "         This will cause considerable performance loss!",
 +                          sbuf2, nthreads_hw_avail, sbuf1, nppn*modth.gnth, sbuf);
 +        }
 +    }
 +}
 +
 +int gmx_omp_nthreads_get(int mod)
 +{
 +    if (mod < 0 || mod >= emntNR)
 +    {
 +        /* invalid module queried */
 +        return -1;
 +    }
 +    else
 +    {
 +        return modth.nth[mod];
 +    }
 +}
index d99803d8287c5538ed9c6eb500aedd75d87ce5ac,0000000000000000000000000000000000000000..d5473499fa268b6a7c1f06e5a96d2de0d075544f
mode 100644,000000..100644
--- /dev/null
@@@ -1,180 -1,0 +1,180 @@@
-   def_bonded  ("PIDIHS",   "Improper Dih.",   4, 3, 3,  eNR_PROPER, pdihs         ),
 +/*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * GROningen Mixture of Alchemy and Childrens' Stories
 + */
 +/* This file is completely threadsafe - keep it that way! */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +
 +#include "typedefs.h"
 +#include "bondf.h"
 +#include "disre.h"
 +#include "orires.h"
 +#include "genborn.h"
 +
 +
 +#define  def_bonded(str,lstr,nra,nrpa,nrpb,ind,func)\
 +   {str,lstr,(nra),(nrpa),(nrpb),IF_BOND,                        (ind),(func)}
 +
 +#define  def_bondedz(str,lstr,nra,nrpa,nrpb,ind,func)\
 +   {str,lstr,(nra),(nrpa),(nrpb),IF_BOND | IF_LIMZERO,           (ind),(func)}
 +
 +#define  def_bondedt(str,lstr,nra,nrpa,nrpb,ind,func)\
 +   {str,lstr,(nra),(nrpa),(nrpb),IF_BOND | IF_TABULATED,         (ind),(func)}
 +
 +#define  def_bondedtz(str,lstr,nra,nrpa,nrpb,ind,func)\
 +   {str,lstr,(nra),(nrpa),(nrpb),IF_BOND | IF_TABULATED | IF_LIMZERO,(ind),(func)}
 +
 +#define   def_angle(str,lstr,nra,nrpa,nrpb,ind,func)\
 +   {str,lstr,(nra),(nrpa),(nrpb),IF_BOND | IF_ATYPE,(ind),(func)}
 +   
 +#define    def_bond(str,lstr,nra,nrpa,nrpb,ind,func)\
 +   {str,lstr,(nra),(nrpa),(nrpb),IF_BOND | IF_CHEMBOND | IF_BTYPE,(ind),(func)}
 +
 +#define    def_bondt(str,lstr,nra,nrpa,nrpb,ind,func)\
 +   {str,lstr,(nra),(nrpa),(nrpb),IF_BOND | IF_CHEMBOND | IF_TABULATED,(ind),(func)}
 +
 +#define  def_bondnb(str,lstr,nra,nrpa,nrpb,ind,func)\
 +   {str,lstr,(nra),(nrpa),(nrpb),IF_BOND | IF_CHEMBOND,(ind),(func)}
 +
 +#define   def_vsite(str,lstr,nra,nrpa)\
 +   {str,lstr,(nra),(nrpa),     0,IF_VSITE,                  -1, unimplemented}
 +
 +#define     def_shk(str,lstr,nra,nrpa,nrpb)\
 +   {str,lstr,(nra),(nrpa),(nrpb),IF_CONSTRAINT,             -1, unimplemented}
 +
 +#define   def_shkcb(str,lstr,nra,nrpa,nrpb)\
 +   {str,lstr,(nra),(nrpa),(nrpb),IF_CONSTRAINT | IF_CHEMBOND,-1, unimplemented}
 +   
 +#define      def_nb(str,lstr,nra, nrp)\
 +   {str,lstr,(nra), (nrp),     0,IF_NULL,                    -1,unimplemented}
 +   
 +#define    def_nofc(str,lstr)\
 +   {str,lstr,    0,     0,     0,IF_NULL,                    -1,unimplemented}
 +
 +/* this MUST correspond to the enum in include/types/idef.h */
 +const t_interaction_function interaction_function[F_NRE]=
 +{
 +  def_bond    ("BONDS",    "Bond",            2, 2, 2,  eNR_BONDS,  bonds         ),
 +  def_bond    ("G96BONDS", "G96Bond",         2, 2, 2,  eNR_BONDS,  g96bonds      ),
 +  def_bond    ("MORSE",    "Morse",           2, 3, 3,  eNR_MORSE,  morse_bonds   ),
 +  def_bond    ("CUBICBONDS","Cubic Bonds",    2, 3, 0,  eNR_CUBICBONDS, cubic_bonds),
 +  def_bondnb  ("CONNBONDS","Connect Bonds",   2, 0, 0,  0,      unimplemented     ),
 +  def_bonded  ("HARMONIC", "Harmonic Pot.",   2, 2, 2,  eNR_BONDS,  bonds         ),
 +  def_bondnb  ("FENEBONDS", "FENE Bonds",     2, 2, 0,  eNR_FENEBONDS, FENE_bonds ),
 +  def_bondt   ("TABBONDS", "Tab. Bonds",      2, 2, 2,  eNR_TABBONDS, tab_bonds   ),
 +  def_bondedtz("TABBONDSNC", "Tab. Bonds NC", 2, 2, 2,  eNR_TABBONDS, tab_bonds   ),
 +  def_bonded  ("RESTRAINTPOT", "Restraint Pot.", 2, 4, 4,  eNR_RESTRBONDS,  restraint_bonds ),
 +  def_angle   ("ANGLES",   "Angle",           3, 2, 2,  eNR_ANGLES, angles        ),
 +  def_angle   ("G96ANGLES","G96Angle",        3, 2, 2,  eNR_ANGLES, g96angles     ),
 +  def_angle   ("LINEAR_ANGLES", "Lin. Angle", 3, 2, 2,  eNR_LINEAR_ANGLES, linear_angles ),
 +  def_bonded  ("CROSS_BOND_BOND", "Bond-Cross", 3, 3, 0,0,          cross_bond_bond ),
 +  def_bonded  ("CROSS_BOND_ANGLE","BA-Cross",   3, 4, 0,0,          cross_bond_angle ),
 +  def_angle   ("UREY_BRADLEY","U-B",          3, 4, 4,  0,          urey_bradley ),
 +  def_angle   ("QANGLES","Quartic Angles",    3, 6, 0,  eNR_QANGLES, quartic_angles ),
 +  def_bondedt ("TABANGLES", "Tab. Angles",    3, 2, 2,  eNR_TABANGLES, tab_angles ),
 +  def_bonded  ("PDIHS",    "Proper Dih.",     4, 3, 3,  eNR_PROPER, pdihs         ),
 +  def_bonded  ("RBDIHS",   "Ryckaert-Bell.",  4, 6, 6,  eNR_RB, rbdihs            ),
 +  def_bonded  ("FOURDIHS", "Fourier Dih.",    4, 4, 4,  eNR_FOURDIH, rbdihs       ),
 +  def_bonded  ("IDIHS",    "Improper Dih.",   4, 2, 2,  eNR_IMPROPER,idihs        ),
++  def_bonded  ("PIDIHS",   "Improper Dih.",   4, 3, 3,  eNR_IMPROPER, pdihs       ),
 +  def_bondedt ("TABDIHS", "Tab. Dih.",        4, 2, 2,  eNR_TABDIHS, tab_dihs     ),
 +  def_bonded  ("CMAP",  "CMAP Dih.",          5, -1, -1,  eNR_CMAP,   unimplemented ),
 +  def_bonded  ("GB12",     "GB 1-2 Pol.",     2, 4, 0,  eNR_GB,     unimplemented ),
 +  def_bonded  ("GB13",     "GB 1-3 Pol.",     2, 4, 0,  eNR_GB,     unimplemented ),
 +  def_bonded  ("GB14",     "GB 1-4 Pol.",     2, 4, 0,  eNR_GB,     unimplemented ),
 +  def_nofc    ("GBPOL",    "GB Polarization" ),
 +  def_nofc    ("NPSOLVATION", "Nonpolar Sol." ),
 +  def_bondedz ("LJ14",     "LJ-14",           2, 2, 2,  eNR_NB14,   unimplemented ),
 +  def_nofc    ("COUL14",   "Coulomb-14"                                           ),
 +  def_bondedz ("LJC14_Q",  "LJC-14 q",        2, 5, 0,  eNR_NB14,   unimplemented ),
 +  def_bondedz ("LJC_NB",   "LJC Pairs NB",    2, 4, 0,  eNR_NB14,   unimplemented ),
 +  def_nb      ("LJ_SR",    "LJ (SR)",         2, 2                                ),
 +  def_nb      ("BHAM",     "Buck.ham (SR)",   2, 3                                ),
 +  def_nofc    ("LJ_LR",    "LJ (LR)"                                              ),
 +  def_nofc    ("BHAM_LR",  "Buck.ham (LR)"                                        ),
 +  def_nofc    ("DISPCORR", "Disper. corr."                                        ),
 +  def_nofc    ("COUL_SR",  "Coulomb (SR)"                                         ),
 +  def_nofc    ("COUL_LR",  "Coulomb (LR)"                                         ),
 +  def_nofc    ("RF_EXCL",  "RF excl."                                             ),
 +  def_nofc    ("COUL_RECIP", "Coul. recip."                                       ),
 +  def_nofc    ("DPD",      "DPD"                                                  ),
 +  def_bondnb  ("POLARIZATION", "Polarization",2, 1, 0,  0,          polarize      ),
 +  def_bonded  ("WATERPOL", "Water Pol.",      5, 6, 0,  eNR_WPOL,   water_pol     ),
 +  def_bonded  ("THOLE",    "Thole Pol.",      4, 3, 0,  eNR_THOLE,  thole_pol     ),
 +  def_bondnb  ("ANHARM_POL", "Anharm. Pol.",2, 3, 0, 0,          anharm_polarize      ),
 +  def_bonded  ("POSRES",   "Position Rest.",  1, 3, 3,  eNR_POSRES, unimplemented ),
 +  def_bonded  ("FBPOSRES","Flat-bottom posres", 1, 3, 0, eNR_FBPOSRES, unimplemented ),
 +  def_bonded  ("DISRES",   "Dis. Rest.",      2, 6, 0,  eNR_DISRES, ta_disres     ),
 +  def_nofc    ("DISRESVIOL",   "D.R.Viol. (nm)"                                       ),
 +  def_bonded  ("ORIRES",   "Orient. Rest.",   2, 6, 0,  eNR_ORIRES, orires        ),
 +  def_nofc    ("ORDEV",    "Ori. R. RMSD"                                         ),  
 +  def_bonded  ("ANGRES",   "Angle Rest.",     4, 3, 3,  eNR_ANGRES, angres        ),
 +  def_bonded  ("ANGRESZ",  "Angle Rest. Z",   2, 3, 3,  eNR_ANGRESZ,angresz       ),
 +  def_bonded  ("DIHRES",   "Dih. Rest.",      4, 3, 3,  eNR_DIHRES, dihres        ),
 +  def_nofc    ("DIHRESVIOL",  "Dih. Rest. Viol."                                     ), /* obsolete */
 +  def_shkcb   ("CONSTR",   "Constraint",      2, 1, 1                             ),
 +  def_shk     ("CONSTRNC", "Constr. No Conn.",2, 1, 1                             ),
 +  def_shkcb   ("SETTLE",   "Settle",          3, 2, 0                             ),
 +  def_vsite   ("VSITE2",   "Virtual site 2",  3, 1                                ),
 +  def_vsite   ("VSITE3",   "Virtual site 3",  4, 2                                ),
 +  def_vsite   ("VSITE3FD", "Virtual site 3fd",4, 2                                ),
 +  def_vsite   ("VSITE3FAD","Virtual site 3fad",4, 2                               ),
 +  def_vsite   ("VSITE3OUT","Virtual site 3out",4, 3                               ),
 +  def_vsite   ("VSITE4FD", "Virtual site 4fd", 5, 3                               ),
 +  def_vsite   ("VSITE4FDN","Virtual site 4fdn",5, 3                               ),
 +  def_vsite   ("VSITEN",   "Virtual site N",   2, 2                               ),
 +  def_nofc    ("COM_PULL", "COM Pull En."     ),
 +  def_nofc    ("EQM",      "Quantum En."      ),
 +  def_nofc    ("EPOT",     "Potential"        ),
 +  def_nofc    ("EKIN",     "Kinetic En."      ),
 +  def_nofc    ("ETOT",     "Total Energy"     ),
 +  def_nofc    ("ECONS",    "Conserved En."    ),
 +  def_nofc    ("TEMP",     "Temperature"      ),
 +  def_nofc    ("VTEMP",    "Vir. Temp. (not used)"      ),
 +  /* Note that pressure names can not be more than 8 char's,
 +   * because " (bar)" is appended to them.
 +   */
 +  def_nofc    ("PDISPCORR","Pres. DC"         ),  
 +  def_nofc    ("PRES",     "Pressure"         ),
 +  def_nofc    ("DH/DL_CON","dH/dl constr."    ), /* obsolete */
 +  def_nofc    ("DV/DL",    "dVremain/dl"      ),
 +  def_nofc    ("DK/DL",    "dEkin/dl"         ),
 +  def_nofc    ("DVC/DL",   "dVcoul/dl"        ),
 +  def_nofc    ("DVV/DL",   "dVvdw/dl"         ),
 +  def_nofc    ("DVB/DL",   "dVbonded/dl"      ),
 +  def_nofc    ("DVR/DL",   "dVrestraint/dl"   ),
 +  def_nofc    ("DVT/DL",   "dVtemperature/dl" )
 +};
index 7fb2314fdb64c2939a82096fc22b29540a0d91ed,0000000000000000000000000000000000000000..e41e2e977721dccdb8cec31de4c17791db946676
mode 100644,000000..100644
--- /dev/null
@@@ -1,877 -1,0 +1,880 @@@
-   if (at_start != 0) {
-     gmx_incons("In mk_graph_ilist at_start can not be != 0");
-   }
-   g->natoms = at_end;
 +/*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * GROningen Mixture of Alchemy and Childrens' Stories
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <string.h>
 +#include "smalloc.h"
 +#include "gmx_fatal.h"
 +#include "macros.h"
 +#include "vec.h"
 +#include "futil.h"
 +#include "copyrite.h"
 +#include "mshift.h"
 +#include "main.h"
 +#include "pbc.h"
 +
 +/************************************************************
 + *
 + *      S H I F T   U T I L I T I E S
 + *
 + ************************************************************/
 + 
 +
 +/************************************************************
 + *
 + *      G R A P H   G E N E R A T I O N    C O D E
 + *
 + ************************************************************/
 +
 +static void add_gbond(t_graph *g,atom_id a0,atom_id a1)
 +{
 +  int     i;
 +  atom_id inda0,inda1;
 +  gmx_bool    bFound;
 +
 +  inda0 = a0 - g->at_start;
 +  inda1 = a1 - g->at_start;
 +  bFound = FALSE;
 +  /* Search for a direct edge between a0 and a1.
 +   * All egdes are bidirectional, so we only need to search one way.
 +   */
 +  for(i=0; (i<g->nedge[inda0] && !bFound); i++) {
 +    bFound = (g->edge[inda0][i] == a1);
 +  }
 +
 +  if (!bFound) {
 +    g->edge[inda0][g->nedge[inda0]++] = a1;
 +    g->edge[inda1][g->nedge[inda1]++] = a0;
 +  }
 +}
 +
 +static void mk_igraph(t_graph *g,int ftype,t_ilist *il,
 +                    int at_start,int at_end,
 +                    int *part)
 +{
 +  t_iatom *ia;
 +  int     i,j,np;
 +  int     end;
 +
 +  end=il->nr;
 +  ia=il->iatoms;
 +
 +  i = 0;
 +  while (i < end) {
 +    np = interaction_function[ftype].nratoms;
 +    
 +    if (ia[1] >= at_start && ia[1] < at_end) {
 +      if (ia[np] >= at_end)
 +      gmx_fatal(FARGS,
 +                "Molecule in topology has atom numbers below and "
 +                "above natoms (%d).\n"
 +                "You are probably trying to use a trajectory which does "
 +                "not match the first %d atoms of the run input file.\n"
 +                "You can make a matching run input file with tpbconv.",
 +                at_end,at_end);
 +      if (ftype == F_SETTLE) {
 +      /* Bond all the atoms in the settle */
 +      add_gbond(g,ia[1],ia[2]);
 +      add_gbond(g,ia[1],ia[3]);
 +      } else if (part == NULL) {
 +      /* Simply add this bond */
 +      for(j=1; j<np; j++) {
 +        add_gbond(g,ia[j],ia[j+1]);
 +      }
 +      } else {
 +      /* Add this bond when it connects two unlinked parts of the graph */
 +      for(j=1; j<np; j++) {
 +        if (part[ia[j]] != part[ia[j+1]]) {
 +          add_gbond(g,ia[j],ia[j+1]);
 +        }
 +      }
 +      }
 +    }
 +    ia+=np+1;
 +    i+=np+1;
 +  }
 +}
 +
 +GMX_ATTRIBUTE_NORETURN static void g_error(int line,const char *file) 
 +{
 +  gmx_fatal(FARGS,"Tring to print non existant graph (file %s, line %d)",
 +            file,line);
 +}
 +#define GCHECK(g) if (g == NULL) g_error(__LINE__,__FILE__)
 +
 +void p_graph(FILE *log,const char *title,t_graph *g)
 +{
 +  int  i,j;
 +  const char *cc[egcolNR] = { "W", "G", "B" };
 +  
 +  GCHECK(g);
 +  fprintf(log,"graph:  %s\n",title);
 +  fprintf(log,"nnodes: %d\n",g->nnodes);
 +  fprintf(log,"nbound: %d\n",g->nbound);
 +  fprintf(log,"start:  %d\n",g->at_start);
 +  fprintf(log,"end:    %d\n",g->at_end);
 +  fprintf(log," atom shiftx shifty shiftz C nedg    e1    e2 etc.\n");
 +  for(i=0; (i<g->nnodes); i++)
 +    if (g->nedge[i] > 0) {
 +      fprintf(log,"%5d%7d%7d%7d %1s%5d",g->at_start+i+1,
 +            g->ishift[g->at_start+i][XX],
 +            g->ishift[g->at_start+i][YY],
 +            g->ishift[g->at_start+i][ZZ],
 +            (g->negc > 0) ? cc[g->egc[i]] : " ",
 +            g->nedge[i]);
 +      for(j=0; (j<g->nedge[i]); j++)
 +      fprintf(log," %5u",g->edge[i][j]+1);
 +      fprintf(log,"\n");
 +    }
 +  fflush(log);
 +}
 +
 +static void calc_1se(t_graph *g,int ftype,t_ilist *il,
 +                   int nbond[],int at_start,int at_end)
 +{
 +  int     k,nratoms,end,j;
 +  t_iatom *ia,iaa;
 +
 +  end=il->nr;
 +
 +  ia=il->iatoms;
 +  for(j=0; (j<end); j+=nratoms+1,ia+=nratoms+1) {
 +    nratoms = interaction_function[ftype].nratoms;
 +    
 +    if (ftype == F_SETTLE) {
 +      iaa          = ia[1];
 +      if (iaa >= at_start && iaa < at_end) {
 +      nbond[iaa]   += 2;
 +      nbond[ia[2]] += 1;
 +      nbond[ia[3]] += 1;
 +      g->at_start   = min(g->at_start,iaa);
 +      g->at_end     = max(g->at_end,iaa+2+1);
 +      }
 +    } else {
 +      for(k=1; (k<=nratoms); k++) {
 +      iaa=ia[k];
 +      if (iaa >= at_start && iaa < at_end) {
 +        g->at_start = min(g->at_start,iaa);
 +        g->at_end   = max(g->at_end,  iaa+1);
 +        /* When making the graph we (might) link all atoms in an interaction
 +         * sequentially. Therefore the end atoms add 1 to the count,
 +         * the middle atoms 2.
 +         */
 +        if (k == 1 || k == nratoms) {
 +          nbond[iaa] += 1;
 +        } else {
 +          nbond[iaa] += 2;
 +        }
 +      }
 +      }
 +    }
 +  }
 +}
 +
 +static int calc_start_end(FILE *fplog,t_graph *g,t_ilist il[],
 +                        int at_start,int at_end,
 +                        int nbond[])
 +{
 +  int   i,nnb,nbtot;
 +  
 +  g->at_start = at_end;
 +  g->at_end   = 0;
 +
 +  /* First add all the real bonds: they should determine the molecular 
 +   * graph.
 +   */
 +  for(i=0; (i<F_NRE); i++)
 +    if (interaction_function[i].flags & IF_CHEMBOND)
 +      calc_1se(g,i,&il[i],nbond,at_start,at_end);
 +  /* Then add all the other interactions in fixed lists, but first
 +   * check to see what's there already.
 +   */
 +  for(i=0; (i<F_NRE); i++) {
 +    if (!(interaction_function[i].flags & IF_CHEMBOND)) {
 +      calc_1se(g,i,&il[i],nbond,at_start,at_end);
 +    }
 +  }
 +  
 +  nnb   = 0;
 +  nbtot = 0;
 +  for(i=g->at_start; (i<g->at_end); i++) {
 +    nbtot += nbond[i];
 +    nnb    = max(nnb,nbond[i]);
 +  }
 +  if (fplog) {
 +    fprintf(fplog,"Max number of connections per atom is %d\n",nnb);
 +    fprintf(fplog,"Total number of connections is %d\n",nbtot);
 +  }
 +  return nbtot;
 +}
 +
 +
 +
 +static void compact_graph(FILE *fplog,t_graph *g)
 +{
 +  int i,j,n,max_nedge;
 +  atom_id *e;
 +
 +  max_nedge = 0;
 +  n = 0;
 +  for(i=0; i<g->nnodes; i++) {
 +    for(j=0; j<g->nedge[i]; j++) {
 +      g->edge[0][n++] = g->edge[i][j];
 +    }
 +    max_nedge = max(max_nedge,g->nedge[i]);
 +  }
 +  srenew(g->edge[0],n);
 +  /* set pointers after srenew because edge[0] might move */
 +  for(i=1; i<g->nnodes; i++) {
 +    g->edge[i] = g->edge[i-1] + g->nedge[i-1];
 +  }
 +
 +  if (fplog) {
 +    fprintf(fplog,"Max number of graph edges per atom is %d\n",
 +          max_nedge);
 +    fprintf(fplog,"Total number of graph edges is %d\n",n);
 +  }
 +}
 +
 +static gmx_bool determine_graph_parts(t_graph *g,int *part)
 +{
 +  int  i,e;
 +  int  nchanged;
 +  atom_id at_i,*at_i2;
 +  gmx_bool bMultiPart;
 +
 +  /* Initialize the part array with all entries different */
 +  for(at_i=g->at_start; at_i<g->at_end; at_i++) {
 +    part[at_i] = at_i;
 +  }
 +
 +  /* Loop over the graph until the part array is fixed */
 +  do {
 +    bMultiPart = FALSE;
 +    nchanged = 0;
 +    for(i=0; (i<g->nnodes); i++) {
 +      at_i  = g->at_start + i;
 +      at_i2 = g->edge[i];
 +      for(e=0; e<g->nedge[i]; e++) {
 +      /* Set part for both nodes to the minimum */
 +      if (part[at_i2[e]] > part[at_i]) {
 +        part[at_i2[e]] = part[at_i];
 +        nchanged++;
 +      } else if (part[at_i2[e]] < part[at_i]) {
 +        part[at_i] = part[at_i2[e]];
 +        nchanged++;
 +      }
 +      }
 +      if (part[at_i] != part[g->at_start]) {
 +      bMultiPart = TRUE;
 +      }
 +    }
 +    if (debug) {
 +      fprintf(debug,"graph part[] nchanged=%d, bMultiPart=%d\n",
 +            nchanged,bMultiPart);
 +    }
 +  } while (nchanged > 0);
 +
 +  return bMultiPart;
 +}
 +
 +void mk_graph_ilist(FILE *fplog,
 +                  t_ilist *ilist,int at_start,int at_end,
 +                  gmx_bool bShakeOnly,gmx_bool bSettle,
 +                  t_graph *g)
 +{
 +  int     *nbond;
 +  int     i,nbtot;
 +  gmx_bool    bMultiPart;
 +
-   snew(g->ishift,g->natoms);
++  /* The naming is somewhat confusing, but we need g->at0 and g->at1
++   * for shifthing coordinates to a new array (not in place) when
++   * some atoms are not connected by the graph, which runs from
++   * g->at_start (>= g->at0) to g->at_end (<= g->at1).
++   */
++  g->at0 = at_start;
++  g->at1 = at_end;
 +
 +  snew(nbond,at_end);
 +  nbtot = calc_start_end(fplog,g,ilist,at_start,at_end,nbond);
 +  
 +  if (g->at_start >= g->at_end) {
 +    g->at_start = at_start;
 +    g->at_end   = at_end;
 +    g->nnodes   = 0;
 +    g->nbound   = 0;
 +  }
 +  else {
 +    g->nnodes = g->at_end - g->at_start;
 +    snew(g->nedge,g->nnodes);
 +    snew(g->edge,g->nnodes);
 +    /* Allocate a single array and set pointers into it */
 +    snew(g->edge[0],nbtot);
 +    for(i=1; (i<g->nnodes); i++) {
 +      g->edge[i] = g->edge[i-1] + nbond[g->at_start+i-1];
 +    }
 +
 +    if (!bShakeOnly) {
 +      /* First add all the real bonds: they should determine the molecular 
 +       * graph.
 +       */
 +      for(i=0; (i<F_NRE); i++)
 +      if (interaction_function[i].flags & IF_CHEMBOND)
 +        mk_igraph(g,i,&(ilist[i]),at_start,at_end,NULL);
 +
 +      /* Determine of which separated parts the IF_CHEMBOND graph consists.
 +       * Store the parts in the nbond array.
 +       */
 +      bMultiPart = determine_graph_parts(g,nbond);
 +
 +      if (bMultiPart) {
 +      /* Then add all the other interactions in fixed lists,
 +       * but only when they connect parts of the graph
 +       * that are not connected through IF_CHEMBOND interactions.
 +       */      
 +      for(i=0; (i<F_NRE); i++) {
 +        if (!(interaction_function[i].flags & IF_CHEMBOND)) {
 +          mk_igraph(g,i,&(ilist[i]),at_start,at_end,nbond);
 +        }
 +      }
 +      }
 +      
 +      /* Removed all the unused space from the edge array */
 +      compact_graph(fplog,g);
 +    }
 +    else {
 +      /* This is a special thing used in splitter.c to generate shake-blocks */
 +      mk_igraph(g,F_CONSTR,&(ilist[F_CONSTR]),at_start,at_end,NULL);
 +      if (bSettle)
 +      mk_igraph(g,F_SETTLE,&(ilist[F_SETTLE]),at_start,at_end,NULL);
 +    }
 +    g->nbound = 0;
 +    for(i=0; (i<g->nnodes); i++)
 +      if (g->nedge[i] > 0)
 +        g->nbound++;
 +  }
 +
 +  g->negc = 0;
 +  g->egc = NULL;
 +
 +  sfree(nbond);
 +
-   for(i=0; (i<g->natoms); i++) {
++  snew(g->ishift,g->at1);
 +
 +  if (gmx_debug_at)
 +    p_graph(debug,"graph",g);
 +}
 +
 +t_graph *mk_graph(FILE *fplog,
 +                t_idef *idef,int at_start,int at_end,
 +                gmx_bool bShakeOnly,gmx_bool bSettle)
 +{
 +  t_graph *g;
 +
 +  snew(g,1);
 +
 +  mk_graph_ilist(fplog,idef->il,at_start,at_end,bShakeOnly,bSettle,g);
 +
 +  return g;
 +}
 +
 +void done_graph(t_graph *g)
 +{
 +  int i;
 +  
 +  GCHECK(g);
 +  if (g->nnodes > 0) {
 +    sfree(g->nedge);
 +    sfree(g->edge[0]);
 +    sfree(g->edge);
 +    sfree(g->egc);
 +  }
 +  sfree(g->ishift);
 +}
 +
 +/************************************************************
 + *
 + *      S H I F T   C A L C U L A T I O N   C O D E
 + *
 + ************************************************************/
 +
 +static void mk_1shift_tric(int npbcdim,matrix box,rvec hbox,
 +                         rvec xi,rvec xj,int *mi,int *mj)
 +{
 +  /* Calculate periodicity for triclinic box... */
 +  int  m,d;
 +  rvec dx;
 +  
 +  rvec_sub(xi,xj,dx);
 +
 +  mj[ZZ] = 0;
 +  for(m=npbcdim-1; (m>=0); m--) {
 +    /* If dx < hbox, then xj will be reduced by box, so that
 +     * xi - xj will be bigger
 +     */
 +    if (dx[m] < -hbox[m]) {
 +      mj[m]=mi[m]-1;
 +      for(d=m-1; d>=0; d--)
 +      dx[d]+=box[m][d];
 +    } else if (dx[m] >= hbox[m]) {
 +      mj[m]=mi[m]+1;
 +      for(d=m-1; d>=0; d--)
 +      dx[d]-=box[m][d];
 +    } else
 +      mj[m]=mi[m];
 +  }
 +}
 +
 +static void mk_1shift(int npbcdim,rvec hbox,rvec xi,rvec xj,int *mi,int *mj)
 +{
 +  /* Calculate periodicity for rectangular box... */
 +  int  m;
 +  rvec dx;
 +  
 +  rvec_sub(xi,xj,dx);
 +
 +  mj[ZZ] = 0;
 +  for(m=0; (m<npbcdim); m++) {
 +    /* If dx < hbox, then xj will be reduced by box, so that
 +     * xi - xj will be bigger
 +     */
 +    if (dx[m] < -hbox[m])
 +      mj[m]=mi[m]-1;
 +    else if (dx[m] >= hbox[m])
 +      mj[m]=mi[m]+1;
 +    else
 +      mj[m]=mi[m];
 +  }
 +}
 +
 +static void mk_1shift_screw(matrix box,rvec hbox,
 +                          rvec xi,rvec xj,int *mi,int *mj)
 +{
 +  /* Calculate periodicity for rectangular box... */
 +  int  signi,m;
 +  rvec dx;
 +
 +  if ((mi[XX] > 0 &&  mi[XX] % 2 == 1) ||
 +      (mi[XX] < 0 && -mi[XX] % 2 == 1)) {
 +    signi = -1;
 +  } else {
 +    signi =  1;
 +  }
 +
 +  rvec_sub(xi,xj,dx);
 +
 +  if (dx[XX] < -hbox[XX])
 +    mj[XX] = mi[XX] - 1;
 +  else if (dx[XX] >= hbox[XX])
 +    mj[XX] = mi[XX] + 1;
 +  else
 +    mj[XX] = mi[XX];
 +  if (mj[XX] != mi[XX]) {
 +    /* Rotate */
 +    dx[YY] = xi[YY] - (box[YY][YY] + box[ZZ][YY] - xj[YY]);
 +    dx[ZZ] = xi[ZZ] - (box[ZZ][ZZ]               - xj[ZZ]);
 +  }
 +  for(m=1; (m<DIM); m++) {
 +    /* The signs are taken such that we can first shift x and rotate
 +     * and then shift y and z.
 +     */
 +    if (dx[m] < -hbox[m])
 +      mj[m] = mi[m] - signi;
 +    else if (dx[m] >= hbox[m])
 +      mj[m] = mi[m] + signi;
 +    else
 +      mj[m] = mi[m];
 +  }
 +}
 +
 +static int mk_grey(FILE *log,int nnodes,egCol egc[],t_graph *g,int *AtomI,
 +                 int npbcdim,matrix box,rvec x[],int *nerror)
 +{
 +  int      m,j,ng,ai,aj,g0;
 +  rvec     dx,hbox;
 +  gmx_bool     bTriclinic;
 +  ivec     is_aj;
 +  t_pbc    pbc;
 +   
 +  for(m=0; (m<DIM); m++)
 +    hbox[m]=box[m][m]*0.5;
 +  bTriclinic = TRICLINIC(box);
 +  
 +  g0 = g->at_start;
 +  ng = 0;
 +  ai = g0 + *AtomI;
 +  
 +  /* Loop over all the bonds */
 +  for(j=0; (j<g->nedge[ai-g0]); j++) {
 +    aj = g->edge[ai-g0][j];
 +    /* If there is a white one, make it grey and set pbc */
 +    if (g->bScrewPBC)
 +      mk_1shift_screw(box,hbox,x[ai],x[aj],g->ishift[ai],is_aj);
 +    else if (bTriclinic)
 +      mk_1shift_tric(npbcdim,box,hbox,x[ai],x[aj],g->ishift[ai],is_aj);
 +    else
 +      mk_1shift(npbcdim,hbox,x[ai],x[aj],g->ishift[ai],is_aj);
 +    
 +    if (egc[aj-g0] == egcolWhite) {
 +      if (aj - g0 < *AtomI)
 +      *AtomI = aj - g0;
 +      egc[aj-g0] = egcolGrey;
 +      
 +      copy_ivec(is_aj,g->ishift[aj]);
 +
 +      ng++;
 +    }
 +    else if ((is_aj[XX] != g->ishift[aj][XX]) ||
 +           (is_aj[YY] != g->ishift[aj][YY]) ||
 +           (is_aj[ZZ] != g->ishift[aj][ZZ])) {
 +      if (gmx_debug_at) {
 +      set_pbc(&pbc,-1,box);
 +      pbc_dx(&pbc,x[ai],x[aj],dx);
 +      fprintf(debug,"mk_grey: shifts for atom %d due to atom %d\n"
 +              "are (%d,%d,%d), should be (%d,%d,%d)\n"
 +              "dx = (%g,%g,%g)\n",
 +              aj+1,ai+1,is_aj[XX],is_aj[YY],is_aj[ZZ],
 +              g->ishift[aj][XX],g->ishift[aj][YY],g->ishift[aj][ZZ],
 +              dx[XX],dx[YY],dx[ZZ]);
 +      }
 +      (*nerror)++;
 +    }
 +  }
 +  return ng;
 +}
 +
 +static int first_colour(int fC,egCol Col,t_graph *g,egCol egc[])
 +/* Return the first node with colour Col starting at fC.
 + * return -1 if none found.
 + */
 +{
 +  int i;
 +  
 +  for(i=fC; (i<g->nnodes); i++)
 +    if ((g->nedge[i] > 0) && (egc[i]==Col))
 +      return i;
 +  
 +  return -1;
 +}
 +
 +void mk_mshift(FILE *log,t_graph *g,int ePBC,matrix box,rvec x[])
 +{
 +  static int nerror_tot = 0;
 +  int    npbcdim;
 +  int    ng,nnodes,i;
 +  int    nW,nG,nB;            /* Number of Grey, Black, White */
 +  int    fW,fG;                       /* First of each category       */
 +  int    nerror=0;
 +
 +  g->bScrewPBC = (ePBC == epbcSCREW);
 +
 +  if (ePBC == epbcXY)
 +    npbcdim = 2;
 +  else
 +    npbcdim = 3;
 +
 +  GCHECK(g);
 +  /* This puts everything in the central box, that is does not move it 
 +   * at all. If we return without doing this for a system without bonds
 +   * (i.e. only settles) all water molecules are moved to the opposite octant
 +   */
-   for(j=0; j<g0; j++) {
++  for(i=g->at0; (i<g->at1); i++) {
 +      g->ishift[i][XX]=g->ishift[i][YY]=g->ishift[i][ZZ]=0;
 +  }
 +    
 +  if (!g->nbound)
 +    return;
 +
 +  nnodes=g->nnodes;
 +  if (nnodes > g->negc) {
 +    g->negc = nnodes;
 +    srenew(g->egc,g->negc);
 +  }
 +  memset(g->egc,0,(size_t)(nnodes*sizeof(g->egc[0])));
 +
 +  nW=g->nbound;
 +  nG=0;
 +  nB=0;
 +
 +  fW=0;
 +
 +  /* We even have a loop invariant:
 +   * nW+nG+nB == g->nbound
 +   */
 +#ifdef DEBUG2
 +  fprintf(log,"Starting W loop\n");
 +#endif
 +  while (nW > 0) {
 +    /* Find the first white, this will allways be a larger
 +     * number than before, because no nodes are made white
 +     * in the loop
 +     */
 +    if ((fW=first_colour(fW,egcolWhite,g,g->egc)) == -1) 
 +      gmx_fatal(FARGS,"No WHITE nodes found while nW=%d\n",nW);
 +    
 +    /* Make the first white node grey */
 +    g->egc[fW]=egcolGrey;
 +    nG++;
 +    nW--;
 +
 +    /* Initial value for the first grey */
 +    fG=fW;
 +#ifdef DEBUG2
 +    fprintf(log,"Starting G loop (nW=%d, nG=%d, nB=%d, total %d)\n",
 +          nW,nG,nB,nW+nG+nB);
 +#endif
 +    while (nG > 0) {
 +      if ((fG=first_colour(fG,egcolGrey,g,g->egc)) == -1)
 +      gmx_fatal(FARGS,"No GREY nodes found while nG=%d\n",nG);
 +      
 +      /* Make the first grey node black */
 +      g->egc[fG]=egcolBlack;
 +      nB++;
 +      nG--;
 +
 +      /* Make all the neighbours of this black node grey
 +       * and set their periodicity 
 +       */
 +      ng=mk_grey(log,nnodes,g->egc,g,&fG,npbcdim,box,x,&nerror);
 +      /* ng is the number of white nodes made grey */
 +      nG+=ng;
 +      nW-=ng;
 +    }
 +  }
 +  if (nerror > 0) {
 +    nerror_tot++;
 +    if (nerror_tot <= 100) {
 +      fprintf(stderr,"There were %d inconsistent shifts. Check your topology\n",
 +            nerror);
 +      if (log) {
 +      fprintf(log,"There were %d inconsistent shifts. Check your topology\n",
 +              nerror);
 +      }
 +    }
 +    if (nerror_tot == 100) {
 +      fprintf(stderr,"Will stop reporting inconsistent shifts\n");
 +      if (log) {
 +      fprintf(log,"Will stop reporting inconsistent shifts\n");
 +      }
 +    }
 +  }
 +}
 +
 +/************************************************************
 + *
 + *      A C T U A L   S H I F T   C O D E
 + *
 + ************************************************************/
 +
 +void shift_x(t_graph *g,matrix box,rvec x[],rvec x_s[])
 +{
 +  ivec *is;
 +  int      g0,g1;
 +  int      j,tx,ty,tz;
 +
 +  GCHECK(g);
 +  g0 = g->at_start;
 +  g1 = g->at_end;
 +  is = g->ishift;
 +  
-   for(j=g1; j<g->natoms; j++) {
++  for(j=g->at0; j<g0; j++) {
 +    copy_rvec(x[j],x_s[j]);
 +  }
 +
 +  if (g->bScrewPBC) {
 +    for(j=g0; (j<g1); j++) { 
 +      tx=is[j][XX];
 +      ty=is[j][YY];
 +      tz=is[j][ZZ];
 +      
 +      if ((tx > 0 && tx % 2 == 1) ||
 +        (tx < 0 && -tx %2 == 1)) {
 +      x_s[j][XX] = x[j][XX] + tx*box[XX][XX];
 +      x_s[j][YY] = box[YY][YY] + box[ZZ][YY] - x[j][YY];
 +      x_s[j][ZZ] = box[ZZ][ZZ]               - x[j][ZZ];
 +      } else {
 +      x_s[j][XX] = x[j][XX];
 +      }
 +      x_s[j][YY] = x[j][YY] + ty*box[YY][YY] + tz*box[ZZ][YY];
 +      x_s[j][ZZ] = x[j][ZZ] + tz*box[ZZ][ZZ];
 +    }
 +  } else if (TRICLINIC(box)) {
 +     for(j=g0; (j<g1); j++) { 
 +       tx=is[j][XX];
 +       ty=is[j][YY];
 +       tz=is[j][ZZ];
 +       
 +       x_s[j][XX]=x[j][XX]+tx*box[XX][XX]+ty*box[YY][XX]+tz*box[ZZ][XX];
 +       x_s[j][YY]=x[j][YY]+ty*box[YY][YY]+tz*box[ZZ][YY];
 +       x_s[j][ZZ]=x[j][ZZ]+tz*box[ZZ][ZZ];
 +     }
 +  } else {
 +     for(j=g0; (j<g1); j++) { 
 +       tx=is[j][XX];
 +       ty=is[j][YY];
 +       tz=is[j][ZZ];
 +       
 +       x_s[j][XX]=x[j][XX]+tx*box[XX][XX];
 +       x_s[j][YY]=x[j][YY]+ty*box[YY][YY];
 +       x_s[j][ZZ]=x[j][ZZ]+tz*box[ZZ][ZZ];
 +     }
 +  }       
 +
-   for(j=0; j<g0; j++) {
++  for(j=g1; j<g->at1; j++) {
 +    copy_rvec(x[j],x_s[j]);
 +  }
 +}
 +
 +void shift_self(t_graph *g,matrix box,rvec x[])
 +{
 +  ivec *is;
 +  int      g0,g1;
 +  int      j,tx,ty,tz;
 +
 +  if (g->bScrewPBC)
 +    gmx_incons("screw pbc not implemented for shift_self");
 +
 +  g0 = g->at_start;
 +  g1 = g->at_end;
 +  is = g->ishift;
 +
 +#ifdef DEBUG
 +  fprintf(stderr,"Shifting atoms %d to %d\n",g0,g0+gn);
 +#endif
 +  if(TRICLINIC(box)) {
 +      for(j=g0; (j<g1); j++) { 
 +        tx=is[j][XX];
 +        ty=is[j][YY];
 +        tz=is[j][ZZ];
 +        
 +        x[j][XX]=x[j][XX]+tx*box[XX][XX]+ty*box[YY][XX]+tz*box[ZZ][XX];
 +        x[j][YY]=x[j][YY]+ty*box[YY][YY]+tz*box[ZZ][YY];
 +        x[j][ZZ]=x[j][ZZ]+tz*box[ZZ][ZZ];
 +      }
 +  } else {
 +      for(j=g0; (j<g1); j++) { 
 +        tx=is[j][XX];
 +        ty=is[j][YY];
 +        tz=is[j][ZZ];
 +        
 +        x[j][XX]=x[j][XX]+tx*box[XX][XX];
 +        x[j][YY]=x[j][YY]+ty*box[YY][YY];
 +        x[j][ZZ]=x[j][ZZ]+tz*box[ZZ][ZZ];
 +      }
 +  }       
 +}
 +
 +void unshift_x(t_graph *g,matrix box,rvec x[],rvec x_s[])
 +{
 +  ivec *is;
 +  int      g0,g1;
 +  int      j,tx,ty,tz;
 +
 +  if (g->bScrewPBC)
 +    gmx_incons("screw pbc not implemented (yet) for unshift_x");
 +
 +  g0 = g->at_start;
 +  g1 = g->at_end;
 +  is = g->ishift;
 +
-   for(j=g1; j<g->natoms; j++) {
++  for(j=g->at0; j<g0; j++) {
 +    copy_rvec(x_s[j],x[j]);
 +  }
 +
 +  if(TRICLINIC(box)) {
 +      for(j=g0; (j<g1); j++) {
 +        tx=is[j][XX];
 +        ty=is[j][YY];
 +        tz=is[j][ZZ];
 +        
 +        x[j][XX]=x_s[j][XX]-tx*box[XX][XX]-ty*box[YY][XX]-tz*box[ZZ][XX];
 +        x[j][YY]=x_s[j][YY]-ty*box[YY][YY]-tz*box[ZZ][YY];
 +        x[j][ZZ]=x_s[j][ZZ]-tz*box[ZZ][ZZ];
 +      }
 +  } else {
 +      for(j=g0; (j<g1); j++) {
 +        tx=is[j][XX];
 +        ty=is[j][YY];
 +        tz=is[j][ZZ];
 +        
 +        x[j][XX]=x_s[j][XX]-tx*box[XX][XX];
 +        x[j][YY]=x_s[j][YY]-ty*box[YY][YY];
 +        x[j][ZZ]=x_s[j][ZZ]-tz*box[ZZ][ZZ];
 +      }
 +  }
 +
++  for(j=g1; j<g->at1; j++) {
 +    copy_rvec(x_s[j],x[j]);
 +  }
 +}
 +
 +void unshift_self(t_graph *g,matrix box,rvec x[])
 +{
 +  ivec *is;
 +  int g0,g1;
 +  int j,tx,ty,tz;
 +
 +  if (g->bScrewPBC)
 +    gmx_incons("screw pbc not implemented for unshift_self");
 +
 +  g0 = g->at_start;
 +  g1 = g->at_end;
 +  is = g->ishift;
 +
 +  if(TRICLINIC(box)) {
 +      for(j=g0; (j<g1); j++) {
 +        tx=is[j][XX];
 +        ty=is[j][YY];
 +        tz=is[j][ZZ];
 +        
 +        x[j][XX]=x[j][XX]-tx*box[XX][XX]-ty*box[YY][XX]-tz*box[ZZ][XX];
 +        x[j][YY]=x[j][YY]-ty*box[YY][YY]-tz*box[ZZ][YY];
 +        x[j][ZZ]=x[j][ZZ]-tz*box[ZZ][ZZ];
 +      }
 +  } else {
 +      for(j=g0; (j<g1); j++) {
 +        tx=is[j][XX];
 +        ty=is[j][YY];
 +        tz=is[j][ZZ];
 +        
 +        x[j][XX]=x[j][XX]-tx*box[XX][XX];
 +        x[j][YY]=x[j][YY]-ty*box[YY][YY];
 +        x[j][ZZ]=x[j][ZZ]-tz*box[ZZ][ZZ];
 +      }
 +  }
 +}
 +#undef GCHECK
index 2f277f714ca757e3aa189e02112625496ec93d8e,0000000000000000000000000000000000000000..b7dd00c1bec521811bfe5ecb12cde95a37e650fa
mode 100644,000000..100644
--- /dev/null
@@@ -1,824 -1,0 +1,824 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwCSTab_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCSTab_VdwCSTab_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec_vdw->scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 73 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 74 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 9 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*74);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwCSTab_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCSTab_VdwCSTab_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec_vdw->scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 61 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 62 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*62);
 +}
index f8ca813efd47ab603f9439bc7749caf043145b39,0000000000000000000000000000000000000000..a17e85869b34918dc4b7e2877e84a7a6b0fb45a0
mode 100644,000000..100644
--- /dev/null
@@@ -1,1302 -1,0 +1,1302 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwCSTab_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCSTab_VdwCSTab_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 162 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 165 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*165);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwCSTab_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCSTab_VdwCSTab_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 142 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 145 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*145);
 +}
index a390f709b266e16ffe651c8443889dea9894eccb,0000000000000000000000000000000000000000..d3b59098411455b992f4d52a5b4e67a19b43077f
mode 100644,000000..100644
--- /dev/null
@@@ -1,2452 -1,0 +1,2452 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwCSTab_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCSTab_VdwCSTab_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r01,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq01,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq01,FF),_mm256_mul_pd(vftabscale,rinv01)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r02,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq02,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq02,FF),_mm256_mul_pd(vftabscale,rinv02)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq11,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq12,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq21,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq22,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 417 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r01,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq01,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq01,FF),_mm256_mul_pd(vftabscale,rinv01)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r02,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq02,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq02,FF),_mm256_mul_pd(vftabscale,rinv02)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq11,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq12,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq21,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq22,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 426 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*426);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwCSTab_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCSTab_VdwCSTab_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r01,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq01,FF),_mm256_mul_pd(vftabscale,rinv01)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r02,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq02,FF),_mm256_mul_pd(vftabscale,rinv02)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 373 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r01,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq01,FF),_mm256_mul_pd(vftabscale,rinv01)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r02,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq02,FF),_mm256_mul_pd(vftabscale,rinv02)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 382 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*382);
 +}
index f7ff5591fc27260cc845f713ce166431c05618d0,0000000000000000000000000000000000000000..1dc792a0dd0f67b598bd8e127140bd1af03f39a0
mode 100644,000000..100644
--- /dev/null
@@@ -1,1464 -1,0 +1,1464 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwCSTab_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCSTab_VdwCSTab_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r30,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq30,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq30,FF),_mm256_mul_pd(vftabscale,rinv30)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 188 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r30,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq30,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq30,FF),_mm256_mul_pd(vftabscale,rinv30)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 192 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*192);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwCSTab_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCSTab_VdwCSTab_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r30,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq30,FF),_mm256_mul_pd(vftabscale,rinv30)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 168 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r30,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq30,FF),_mm256_mul_pd(vftabscale,rinv30)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 172 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*172);
 +}
index 71cbe5c1da9a359df50c559ad0ec6cbe49472c15,0000000000000000000000000000000000000000..6cd3f6eba2fe1caa3e2d148dde2d235a6d41e8a5
mode 100644,000000..100644
--- /dev/null
@@@ -1,2630 -1,0 +1,2630 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwCSTab_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCSTab_VdwCSTab_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq11,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq12,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r13,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq13,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq13,FF),_mm256_mul_pd(vftabscale,rinv13)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq21,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq22,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r23,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq23,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq23,FF),_mm256_mul_pd(vftabscale,rinv23)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r31,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq31,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq31,FF),_mm256_mul_pd(vftabscale,rinv31)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r32,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq32,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq32,FF),_mm256_mul_pd(vftabscale,rinv32)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r33,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq33,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq33,FF),_mm256_mul_pd(vftabscale,rinv33)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 446 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq11,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq12,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r13,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq13,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq13,FF),_mm256_mul_pd(vftabscale,rinv13)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq21,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq22,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r23,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq23,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq23,FF),_mm256_mul_pd(vftabscale,rinv23)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r31,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq31,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq31,FF),_mm256_mul_pd(vftabscale,rinv31)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r32,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq32,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq32,FF),_mm256_mul_pd(vftabscale,rinv32)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r33,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq33,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq33,FF),_mm256_mul_pd(vftabscale,rinv33)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 456 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*456);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwCSTab_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCSTab_VdwCSTab_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r13,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq13,FF),_mm256_mul_pd(vftabscale,rinv13)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r23,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq23,FF),_mm256_mul_pd(vftabscale,rinv23)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r31,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq31,FF),_mm256_mul_pd(vftabscale,rinv31)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r32,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq32,FF),_mm256_mul_pd(vftabscale,rinv32)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r33,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq33,FF),_mm256_mul_pd(vftabscale,rinv33)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 402 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r13,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq13,FF),_mm256_mul_pd(vftabscale,rinv13)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r23,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq23,FF),_mm256_mul_pd(vftabscale,rinv23)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r31,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq31,FF),_mm256_mul_pd(vftabscale,rinv31)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r32,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq32,FF),_mm256_mul_pd(vftabscale,rinv32)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r33,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq33,FF),_mm256_mul_pd(vftabscale,rinv33)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 412 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*412);
 +}
index 79c8cb40536904a082d01961588e506fefa6ae51,0000000000000000000000000000000000000000..ee84ab14bfd3cc240537295c50145b3b0888809e
mode 100644,000000..100644
--- /dev/null
@@@ -1,748 -1,0 +1,748 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwLJ_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCSTab_VdwLJ_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 56 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 57 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 9 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*57);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwLJ_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCSTab_VdwLJ_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 47 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 48 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*48);
 +}
index 2c89d947447f1d09214f2813252a5a7b04689c00,0000000000000000000000000000000000000000..c2fd96e7b96068fc9ffaac5d4365bd4e617e7a81
mode 100644,000000..100644
--- /dev/null
@@@ -1,1226 -1,0 +1,1226 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwLJ_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCSTab_VdwLJ_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 145 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 148 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*148);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwLJ_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCSTab_VdwLJ_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 128 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 131 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*131);
 +}
index 576abdd9eeed4ee6eb033fb30c8b0c61a58e440d,0000000000000000000000000000000000000000..61771c8dcb890be72fdc9cb7a784dd99f58be283
mode 100644,000000..100644
--- /dev/null
@@@ -1,2376 -1,0 +1,2376 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwLJ_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCSTab_VdwLJ_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r01,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq01,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq01,FF),_mm256_mul_pd(vftabscale,rinv01)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r02,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq02,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq02,FF),_mm256_mul_pd(vftabscale,rinv02)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq11,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq12,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq21,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq22,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 400 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r01,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq01,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq01,FF),_mm256_mul_pd(vftabscale,rinv01)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r02,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq02,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq02,FF),_mm256_mul_pd(vftabscale,rinv02)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq11,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq12,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq21,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq22,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 409 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*409);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwLJ_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCSTab_VdwLJ_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r01,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq01,FF),_mm256_mul_pd(vftabscale,rinv01)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r02,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq02,FF),_mm256_mul_pd(vftabscale,rinv02)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 359 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r01,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq01,FF),_mm256_mul_pd(vftabscale,rinv01)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r02,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq02,FF),_mm256_mul_pd(vftabscale,rinv02)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 368 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*368);
 +}
index e2aacee8cf2543708cfd65d65d1756430a0e8e11,0000000000000000000000000000000000000000..c6846406394de297b298dd20095a425256757ff8
mode 100644,000000..100644
--- /dev/null
@@@ -1,1350 -1,0 +1,1350 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwLJ_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCSTab_VdwLJ_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r30,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq30,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq30,FF),_mm256_mul_pd(vftabscale,rinv30)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 164 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r30,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq30,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq30,FF),_mm256_mul_pd(vftabscale,rinv30)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 167 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*167);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwLJ_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCSTab_VdwLJ_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r30,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq30,FF),_mm256_mul_pd(vftabscale,rinv30)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 147 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r30,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq30,FF),_mm256_mul_pd(vftabscale,rinv30)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 150 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*150);
 +}
index d132597bf164bc3a5c0c5e7c364b18704ca4a9ed,0000000000000000000000000000000000000000..fb78598a585f2127d2bc777dddcbc931ed10963b
mode 100644,000000..100644
--- /dev/null
@@@ -1,2516 -1,0 +1,2516 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwLJ_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCSTab_VdwLJ_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq11,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq12,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r13,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq13,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq13,FF),_mm256_mul_pd(vftabscale,rinv13)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq21,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq22,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r23,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq23,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq23,FF),_mm256_mul_pd(vftabscale,rinv23)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r31,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq31,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq31,FF),_mm256_mul_pd(vftabscale,rinv31)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r32,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq32,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq32,FF),_mm256_mul_pd(vftabscale,rinv32)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r33,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq33,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq33,FF),_mm256_mul_pd(vftabscale,rinv33)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 422 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq11,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq12,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r13,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq13,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq13,FF),_mm256_mul_pd(vftabscale,rinv13)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq21,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq22,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r23,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq23,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq23,FF),_mm256_mul_pd(vftabscale,rinv23)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r31,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq31,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq31,FF),_mm256_mul_pd(vftabscale,rinv31)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r32,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq32,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq32,FF),_mm256_mul_pd(vftabscale,rinv32)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r33,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq33,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq33,FF),_mm256_mul_pd(vftabscale,rinv33)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 431 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*431);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwLJ_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCSTab_VdwLJ_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r13,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq13,FF),_mm256_mul_pd(vftabscale,rinv13)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r23,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq23,FF),_mm256_mul_pd(vftabscale,rinv23)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r31,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq31,FF),_mm256_mul_pd(vftabscale,rinv31)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r32,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq32,FF),_mm256_mul_pd(vftabscale,rinv32)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r33,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq33,FF),_mm256_mul_pd(vftabscale,rinv33)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 381 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r13,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq13,FF),_mm256_mul_pd(vftabscale,rinv13)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r23,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq23,FF),_mm256_mul_pd(vftabscale,rinv23)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r31,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq31,FF),_mm256_mul_pd(vftabscale,rinv31)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r32,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq32,FF),_mm256_mul_pd(vftabscale,rinv32)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r33,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq33,FF),_mm256_mul_pd(vftabscale,rinv33)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 390 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*390);
 +}
index 78d8e130a37a6380be4e8b973521129941762937,0000000000000000000000000000000000000000..0d1fce994cdd80ab59ebf4bb3b728c19ba3f1f7a
mode 100644,000000..100644
--- /dev/null
@@@ -1,653 -1,0 +1,653 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwNone_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            None
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCSTab_VdwNone_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 43 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 44 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 8 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*8 + inneriter*44);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwNone_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            None
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCSTab_VdwNone_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 39 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 40 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*40);
 +}
index ff49f23a3e72c5fdd45351c97895acf115d93662,0000000000000000000000000000000000000000..4ac6dada4978cc86042b3f5fe4b146f043e6dfab
mode 100644,000000..100644
--- /dev/null
@@@ -1,1131 -1,0 +1,1131 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwNone_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            None
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCSTab_VdwNone_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 132 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 135 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_VF,outeriter*19 + inneriter*135);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwNone_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            None
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCSTab_VdwNone_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 120 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 123 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_F,outeriter*18 + inneriter*123);
 +}
index 24cb460f5a5f88ca23716b8ab0996a5e73eea34f,0000000000000000000000000000000000000000..6fe4a91917d78ba374261beb51d8a43c3c9cd010
mode 100644,000000..100644
--- /dev/null
@@@ -1,2311 -1,0 +1,2311 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwNone_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            None
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCSTab_VdwNone_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r01,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq01,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq01,FF),_mm256_mul_pd(vftabscale,rinv01)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r02,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq02,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq02,FF),_mm256_mul_pd(vftabscale,rinv02)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq11,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq12,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq21,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq22,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 387 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r01,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq01,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq01,FF),_mm256_mul_pd(vftabscale,rinv01)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r02,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq02,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq02,FF),_mm256_mul_pd(vftabscale,rinv02)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq11,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq12,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq21,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq22,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 396 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*396);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwNone_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            None
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCSTab_VdwNone_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r01,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq01,FF),_mm256_mul_pd(vftabscale,rinv01)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r02,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq02,FF),_mm256_mul_pd(vftabscale,rinv02)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 351 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq00,FF),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r01,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq01,FF),_mm256_mul_pd(vftabscale,rinv01)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r02,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq02,FF),_mm256_mul_pd(vftabscale,rinv02)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 360 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*360);
 +}
index f62b5f4f40cbc31ae282837328d9141d166e2b94,0000000000000000000000000000000000000000..f43db1f0e42a51bc56c6dabbebcbbd077338aa5f
mode 100644,000000..100644
--- /dev/null
@@@ -1,1131 -1,0 +1,1131 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwNone_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            None
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCSTab_VdwNone_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r30,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq30,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq30,FF),_mm256_mul_pd(vftabscale,rinv30)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 132 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq10,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq20,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r30,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq30,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq30,FF),_mm256_mul_pd(vftabscale,rinv30)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 135 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_VF,outeriter*19 + inneriter*135);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwNone_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            None
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCSTab_VdwNone_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r30,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq30,FF),_mm256_mul_pd(vftabscale,rinv30)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 120 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r10,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq10,FF),_mm256_mul_pd(vftabscale,rinv10)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r20,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq20,FF),_mm256_mul_pd(vftabscale,rinv20)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r30,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq30,FF),_mm256_mul_pd(vftabscale,rinv30)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 123 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_F,outeriter*18 + inneriter*123);
 +}
index e1d8e1edbb79906014d8d6735119d5434d5137b5,0000000000000000000000000000000000000000..1e412c13e1bf64122968ee5bfed9c149c60e4d30
mode 100644,000000..100644
--- /dev/null
@@@ -1,2311 -1,0 +1,2311 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwNone_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            None
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCSTab_VdwNone_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq11,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq12,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r13,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq13,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq13,FF),_mm256_mul_pd(vftabscale,rinv13)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq21,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq22,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r23,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq23,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq23,FF),_mm256_mul_pd(vftabscale,rinv23)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r31,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq31,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq31,FF),_mm256_mul_pd(vftabscale,rinv31)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r32,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq32,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq32,FF),_mm256_mul_pd(vftabscale,rinv32)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r33,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq33,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq33,FF),_mm256_mul_pd(vftabscale,rinv33)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 387 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq11,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq12,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r13,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq13,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq13,FF),_mm256_mul_pd(vftabscale,rinv13)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq21,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq22,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r23,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq23,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq23,FF),_mm256_mul_pd(vftabscale,rinv23)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r31,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq31,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq31,FF),_mm256_mul_pd(vftabscale,rinv31)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r32,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq32,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq32,FF),_mm256_mul_pd(vftabscale,rinv32)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r33,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq33,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq33,FF),_mm256_mul_pd(vftabscale,rinv33)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 396 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*396);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCSTab_VdwNone_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: CubicSplineTable
 + * VdW interaction:            None
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCSTab_VdwNone_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r13,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq13,FF),_mm256_mul_pd(vftabscale,rinv13)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r23,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq23,FF),_mm256_mul_pd(vftabscale,rinv23)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r31,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq31,FF),_mm256_mul_pd(vftabscale,rinv31)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r32,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq32,FF),_mm256_mul_pd(vftabscale,rinv32)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r33,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq33,FF),_mm256_mul_pd(vftabscale,rinv33)));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 351 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r11,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r12,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r13,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq13,FF),_mm256_mul_pd(vftabscale,rinv13)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r21,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r22,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r23,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq23,FF),_mm256_mul_pd(vftabscale,rinv23)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r31,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq31,FF),_mm256_mul_pd(vftabscale,rinv31)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r32,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq32,FF),_mm256_mul_pd(vftabscale,rinv32)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r33,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq33,FF),_mm256_mul_pd(vftabscale,rinv33)));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 360 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*360);
 +}
index f8174446f62198f3e05830c14c3b07933b025a71,0000000000000000000000000000000000000000..647e8e72cfc57378190fbdcf17c9fd2668a57036
mode 100644,000000..100644
--- /dev/null
@@@ -1,796 -1,0 +1,796 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 62 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 63 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 9 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*63);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 53 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 54 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*54);
 +}
index 663ebb50b7022702530a34f3c7e9980cd907c287,0000000000000000000000000000000000000000..019c9b72b9de522a184191b169325ba9d3a278ff
mode 100644,000000..100644
--- /dev/null
@@@ -1,1150 -1,0 +1,1150 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCoul_VdwCSTab_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 119 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 120 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*120);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCoul_VdwCSTab_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 108 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 109 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*109);
 +}
index 3b61a715684bf547e0d631063c8a9630e03614ed,0000000000000000000000000000000000000000..fd6a4d41d6d0b39780747b193895dfe4bdec5201
mode 100644,000000..100644
--- /dev/null
@@@ -1,1928 -1,0 +1,1928 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,rinv01);
 +            felec            = _mm256_mul_pd(velec,rinvsq01);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,rinv02);
 +            felec            = _mm256_mul_pd(velec,rinvsq02);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 278 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,rinv01);
 +            felec            = _mm256_mul_pd(velec,rinvsq01);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,rinv02);
 +            felec            = _mm256_mul_pd(velec,rinvsq02);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 279 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*279);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,rinv01);
 +            felec            = _mm256_mul_pd(velec,rinvsq01);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,rinv02);
 +            felec            = _mm256_mul_pd(velec,rinvsq02);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 261 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,rinv01);
 +            felec            = _mm256_mul_pd(velec,rinvsq01);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,rinv02);
 +            felec            = _mm256_mul_pd(velec,rinvsq02);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 262 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*262);
 +}
index f4d7f9857421c6154e3d2f925270bd10cfa2d8b9,0000000000000000000000000000000000000000..245acdafc588ddc42439d2f50e2eb8e4a9ef8963
mode 100644,000000..100644
--- /dev/null
@@@ -1,1278 -1,0 +1,1278 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCoul_VdwCSTab_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,rinv30);
 +            felec            = _mm256_mul_pd(velec,rinvsq30);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 140 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,rinv30);
 +            felec            = _mm256_mul_pd(velec,rinvsq30);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 141 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*141);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCoul_VdwCSTab_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,rinv30);
 +            felec            = _mm256_mul_pd(velec,rinvsq30);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 129 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,rinv30);
 +            felec            = _mm256_mul_pd(velec,rinvsq30);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 130 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*130);
 +}
index 8266aa2222b7373f7794596c8c113175ca811c9a,0000000000000000000000000000000000000000..746f41666a0926be9b05600deb2dd07f5258841c
mode 100644,000000..100644
--- /dev/null
@@@ -1,2072 -1,0 +1,2072 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,rinv13);
 +            felec            = _mm256_mul_pd(velec,rinvsq13);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,rinv23);
 +            felec            = _mm256_mul_pd(velec,rinvsq23);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,rinv31);
 +            felec            = _mm256_mul_pd(velec,rinvsq31);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,rinv32);
 +            felec            = _mm256_mul_pd(velec,rinvsq32);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,rinv33);
 +            felec            = _mm256_mul_pd(velec,rinvsq33);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 302 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,rinv13);
 +            felec            = _mm256_mul_pd(velec,rinvsq13);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,rinv23);
 +            felec            = _mm256_mul_pd(velec,rinvsq23);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,rinv31);
 +            felec            = _mm256_mul_pd(velec,rinvsq31);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,rinv32);
 +            felec            = _mm256_mul_pd(velec,rinvsq32);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,rinv33);
 +            felec            = _mm256_mul_pd(velec,rinvsq33);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 303 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*303);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,rinv13);
 +            felec            = _mm256_mul_pd(velec,rinvsq13);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,rinv23);
 +            felec            = _mm256_mul_pd(velec,rinvsq23);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,rinv31);
 +            felec            = _mm256_mul_pd(velec,rinvsq31);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,rinv32);
 +            felec            = _mm256_mul_pd(velec,rinvsq32);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,rinv33);
 +            felec            = _mm256_mul_pd(velec,rinvsq33);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 285 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,rinv13);
 +            felec            = _mm256_mul_pd(velec,rinvsq13);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,rinv23);
 +            felec            = _mm256_mul_pd(velec,rinvsq23);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,rinv31);
 +            felec            = _mm256_mul_pd(velec,rinvsq31);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,rinv32);
 +            felec            = _mm256_mul_pd(velec,rinvsq32);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,rinv33);
 +            felec            = _mm256_mul_pd(velec,rinvsq33);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 286 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*286);
 +}
index 6b5b157302968f23c61f1d19fadd7db2ec74bbfd,0000000000000000000000000000000000000000..570d4d09900467ebef2a151f2a10a865600efa8d
mode 100644,000000..100644
--- /dev/null
@@@ -1,668 -1,0 +1,668 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwLJ_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCoul_VdwLJ_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 39 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 39 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 9 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*39);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwLJ_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCoul_VdwLJ_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 33 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 33 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*33);
 +}
index b04ff6f96177d7269edd20fd21a5f7dc435ba3a2,0000000000000000000000000000000000000000..17e34b724ca128caed567f0b7eae58eb4fffafba
mode 100644,000000..100644
--- /dev/null
@@@ -1,1022 -1,0 +1,1022 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwLJ_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCoul_VdwLJ_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 96 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 96 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*96);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwLJ_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCoul_VdwLJ_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 88 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 88 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*88);
 +}
index 172dc67d0b3c39eea520e9190e309dbc60fc038a,0000000000000000000000000000000000000000..902e678fb0805fccbd2a1a6282e635606cc0abb6
mode 100644,000000..100644
--- /dev/null
@@@ -1,1800 -1,0 +1,1800 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwLJ_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCoul_VdwLJ_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,rinv01);
 +            felec            = _mm256_mul_pd(velec,rinvsq01);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,rinv02);
 +            felec            = _mm256_mul_pd(velec,rinvsq02);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 255 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,rinv01);
 +            felec            = _mm256_mul_pd(velec,rinvsq01);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,rinv02);
 +            felec            = _mm256_mul_pd(velec,rinvsq02);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 255 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*255);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwLJ_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCoul_VdwLJ_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,rinv01);
 +            felec            = _mm256_mul_pd(velec,rinvsq01);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,rinv02);
 +            felec            = _mm256_mul_pd(velec,rinvsq02);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 241 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,rinv01);
 +            felec            = _mm256_mul_pd(velec,rinvsq01);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,rinv02);
 +            felec            = _mm256_mul_pd(velec,rinvsq02);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 241 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*241);
 +}
index f2cc1ef6c2a9c6a5031e80f789f7b35f80589acc,0000000000000000000000000000000000000000..390301235e7b2d1b804fb8bb75388f0d730d58dc
mode 100644,000000..100644
--- /dev/null
@@@ -1,1150 -1,0 +1,1150 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwLJ_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCoul_VdwLJ_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,rinv30);
 +            felec            = _mm256_mul_pd(velec,rinvsq30);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 116 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,rinv30);
 +            felec            = _mm256_mul_pd(velec,rinvsq30);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 116 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*116);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwLJ_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCoul_VdwLJ_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,rinv30);
 +            felec            = _mm256_mul_pd(velec,rinvsq30);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 108 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,rinv30);
 +            felec            = _mm256_mul_pd(velec,rinvsq30);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 108 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*108);
 +}
index 456e3ed4bff28f50afe94c48f8557653a48f74c9,0000000000000000000000000000000000000000..fed2a2f7e51942c5972887ca2a64b4cb1d13b4a9
mode 100644,000000..100644
--- /dev/null
@@@ -1,1944 -1,0 +1,1944 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwLJ_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCoul_VdwLJ_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,rinv13);
 +            felec            = _mm256_mul_pd(velec,rinvsq13);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,rinv23);
 +            felec            = _mm256_mul_pd(velec,rinvsq23);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,rinv31);
 +            felec            = _mm256_mul_pd(velec,rinvsq31);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,rinv32);
 +            felec            = _mm256_mul_pd(velec,rinvsq32);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,rinv33);
 +            felec            = _mm256_mul_pd(velec,rinvsq33);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 278 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,rinv13);
 +            felec            = _mm256_mul_pd(velec,rinvsq13);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,rinv23);
 +            felec            = _mm256_mul_pd(velec,rinvsq23);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,rinv31);
 +            felec            = _mm256_mul_pd(velec,rinvsq31);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,rinv32);
 +            felec            = _mm256_mul_pd(velec,rinvsq32);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,rinv33);
 +            felec            = _mm256_mul_pd(velec,rinvsq33);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 278 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*278);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwLJ_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCoul_VdwLJ_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,rinv13);
 +            felec            = _mm256_mul_pd(velec,rinvsq13);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,rinv23);
 +            felec            = _mm256_mul_pd(velec,rinvsq23);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,rinv31);
 +            felec            = _mm256_mul_pd(velec,rinvsq31);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,rinv32);
 +            felec            = _mm256_mul_pd(velec,rinvsq32);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,rinv33);
 +            felec            = _mm256_mul_pd(velec,rinvsq33);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 264 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,rinv13);
 +            felec            = _mm256_mul_pd(velec,rinvsq13);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,rinv23);
 +            felec            = _mm256_mul_pd(velec,rinvsq23);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,rinv31);
 +            felec            = _mm256_mul_pd(velec,rinvsq31);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,rinv32);
 +            felec            = _mm256_mul_pd(velec,rinvsq32);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,rinv33);
 +            felec            = _mm256_mul_pd(velec,rinvsq33);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 264 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*264);
 +}
index 567e96c2896b299900d4c5284a4f869d855a8b60,0000000000000000000000000000000000000000..f51a1c6b8bfe6383be5938460d1eb97471f84867
mode 100644,000000..100644
--- /dev/null
@@@ -1,581 -1,0 +1,581 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwNone_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            None
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCoul_VdwNone_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 27 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 27 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 8 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*8 + inneriter*27);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwNone_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            None
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCoul_VdwNone_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 26 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 26 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*26);
 +}
index 61e29dc4a22e536e5afeb3c78068bf1ca94005fa,0000000000000000000000000000000000000000..a8b9b2e174d21f3d31f012c238f4fb448c7a99e6
mode 100644,000000..100644
--- /dev/null
@@@ -1,935 -1,0 +1,935 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwNone_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            None
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCoul_VdwNone_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 84 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 84 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_VF,outeriter*19 + inneriter*84);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwNone_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            None
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCoul_VdwNone_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 81 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 81 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_F,outeriter*18 + inneriter*81);
 +}
index d7eb1753ef5ed056bb6c72143a13670061db2f33,0000000000000000000000000000000000000000..2d422b4a1e29f2b28030981e32a181cb546dd67c
mode 100644,000000..100644
--- /dev/null
@@@ -1,1743 -1,0 +1,1743 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwNone_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            None
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCoul_VdwNone_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,rinv01);
 +            felec            = _mm256_mul_pd(velec,rinvsq01);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,rinv02);
 +            felec            = _mm256_mul_pd(velec,rinvsq02);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 243 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,rinv01);
 +            felec            = _mm256_mul_pd(velec,rinvsq01);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,rinv02);
 +            felec            = _mm256_mul_pd(velec,rinvsq02);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 243 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*243);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwNone_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            None
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCoul_VdwNone_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,rinv01);
 +            felec            = _mm256_mul_pd(velec,rinvsq01);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,rinv02);
 +            felec            = _mm256_mul_pd(velec,rinvsq02);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 234 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(velec,rinvsq00);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,rinv01);
 +            felec            = _mm256_mul_pd(velec,rinvsq01);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,rinv02);
 +            felec            = _mm256_mul_pd(velec,rinvsq02);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 234 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*234);
 +}
index 90f00e1743c5cd013f68786ae64460392878ab70,0000000000000000000000000000000000000000..6c1d769084a6af72121dcf6cba5db895b0d3876a
mode 100644,000000..100644
--- /dev/null
@@@ -1,935 -1,0 +1,935 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwNone_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            None
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCoul_VdwNone_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,rinv30);
 +            felec            = _mm256_mul_pd(velec,rinvsq30);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 84 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,rinv30);
 +            felec            = _mm256_mul_pd(velec,rinvsq30);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 84 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_VF,outeriter*19 + inneriter*84);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwNone_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            None
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCoul_VdwNone_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,rinv30);
 +            felec            = _mm256_mul_pd(velec,rinvsq30);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 81 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,rinv10);
 +            felec            = _mm256_mul_pd(velec,rinvsq10);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,rinv20);
 +            felec            = _mm256_mul_pd(velec,rinvsq20);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,rinv30);
 +            felec            = _mm256_mul_pd(velec,rinvsq30);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 81 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_F,outeriter*18 + inneriter*81);
 +}
index 4e4bf4fcf6afbbd0e8dd31c5c46c55aebc2d28e6,0000000000000000000000000000000000000000..b77d518579ac85d8302958df2af0fa76d23c902d
mode 100644,000000..100644
--- /dev/null
@@@ -1,1743 -1,0 +1,1743 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwNone_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            None
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecCoul_VdwNone_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,rinv13);
 +            felec            = _mm256_mul_pd(velec,rinvsq13);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,rinv23);
 +            felec            = _mm256_mul_pd(velec,rinvsq23);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,rinv31);
 +            felec            = _mm256_mul_pd(velec,rinvsq31);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,rinv32);
 +            felec            = _mm256_mul_pd(velec,rinvsq32);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,rinv33);
 +            felec            = _mm256_mul_pd(velec,rinvsq33);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 243 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,rinv13);
 +            felec            = _mm256_mul_pd(velec,rinvsq13);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,rinv23);
 +            felec            = _mm256_mul_pd(velec,rinvsq23);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,rinv31);
 +            felec            = _mm256_mul_pd(velec,rinvsq31);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,rinv32);
 +            felec            = _mm256_mul_pd(velec,rinvsq32);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,rinv33);
 +            felec            = _mm256_mul_pd(velec,rinvsq33);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 243 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*243);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwNone_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: Coulomb
 + * VdW interaction:            None
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecCoul_VdwNone_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,rinv13);
 +            felec            = _mm256_mul_pd(velec,rinvsq13);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,rinv23);
 +            felec            = _mm256_mul_pd(velec,rinvsq23);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,rinv31);
 +            felec            = _mm256_mul_pd(velec,rinvsq31);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,rinv32);
 +            felec            = _mm256_mul_pd(velec,rinvsq32);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,rinv33);
 +            felec            = _mm256_mul_pd(velec,rinvsq33);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 234 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,rinv11);
 +            felec            = _mm256_mul_pd(velec,rinvsq11);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,rinv12);
 +            felec            = _mm256_mul_pd(velec,rinvsq12);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,rinv13);
 +            felec            = _mm256_mul_pd(velec,rinvsq13);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,rinv21);
 +            felec            = _mm256_mul_pd(velec,rinvsq21);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,rinv22);
 +            felec            = _mm256_mul_pd(velec,rinvsq22);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,rinv23);
 +            felec            = _mm256_mul_pd(velec,rinvsq23);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,rinv31);
 +            felec            = _mm256_mul_pd(velec,rinvsq31);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,rinv32);
 +            felec            = _mm256_mul_pd(velec,rinvsq32);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,rinv33);
 +            felec            = _mm256_mul_pd(velec,rinvsq33);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 234 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*234);
 +}
index 395cb97ba1141e5df7f3e6fecb859bcc414b9542,0000000000000000000000000000000000000000..a93cd0f04bc021ae268b8dabacf9d90c8c9a3641
mode 100644,000000..100644
--- /dev/null
@@@ -1,804 -1,0 +1,804 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSh_VdwLJSh_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_sub_pd(rinv00,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 64 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_sub_pd(rinv00,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 65 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 9 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*65);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSh_VdwLJSh_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 46 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 47 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*47);
 +}
index 8335c885fe7dd3bb166b7ad3a13b43a228b36e60,0000000000000000000000000000000000000000..84f79ddcbc4b28d146f4a94358529087cb006df3
mode 100644,000000..100644
--- /dev/null
@@@ -1,1338 -1,0 +1,1338 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_sub_pd(rinv00,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 159 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_sub_pd(rinv00,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 162 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*162);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 127 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 130 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*130);
 +}
index 2744788517163d18cef42d70fe77c0924f593d9b,0000000000000000000000000000000000000000..5c397f9057961d2039368f18ee1212457f5fa6be
mode 100644,000000..100644
--- /dev/null
@@@ -1,2656 -1,0 +1,2656 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_sub_pd(rinv00,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_sub_pd(rinv01,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_sub_pd(rinv02,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_sub_pd(rinv11,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_sub_pd(rinv12,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_sub_pd(rinv21,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_sub_pd(rinv22,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 432 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_sub_pd(rinv00,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_sub_pd(rinv01,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_sub_pd(rinv02,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_sub_pd(rinv11,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_sub_pd(rinv12,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_sub_pd(rinv21,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_sub_pd(rinv22,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 441 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*441);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 358 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 367 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*367);
 +}
index bc124ee30cfbb8cd99dcd840b63b981d53c43ad2,0000000000000000000000000000000000000000..2ada318bd1f2741cbc35aee35588fd3333344434
mode 100644,000000..100644
--- /dev/null
@@@ -1,1502 -1,0 +1,1502 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSh_VdwLJSh_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_sub_pd(rinv30,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 182 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_sub_pd(rinv30,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 185 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*185);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSh_VdwLJSh_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 150 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 153 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*153);
 +}
index 7ac56e43419ad78fd970132d8024e8b801c4763e,0000000000000000000000000000000000000000..c2f9de398d9c3269177835d74d76d25cefa948ca
mode 100644,000000..100644
--- /dev/null
@@@ -1,2836 -1,0 +1,2836 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_sub_pd(rinv11,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_sub_pd(rinv12,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_sub_pd(rinv13,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_sub_pd(rinv21,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_sub_pd(rinv22,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_sub_pd(rinv23,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_sub_pd(rinv31,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_sub_pd(rinv32,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_sub_pd(rinv33,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 458 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_sub_pd(rinv11,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_sub_pd(rinv12,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_sub_pd(rinv13,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_sub_pd(rinv21,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_sub_pd(rinv22,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_sub_pd(rinv23,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_sub_pd(rinv31,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_sub_pd(rinv32,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_sub_pd(rinv33,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 467 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*467);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 384 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 393 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*393);
 +}
index 265055c7d3518d121da2b9c0c3269e0fb5e14166,0000000000000000000000000000000000000000..b509fdf1d5c40320432a531917db33efd0f3e9ad
mode 100644,000000..100644
--- /dev/null
@@@ -1,707 -1,0 +1,707 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwNone_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSh_VdwNone_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_sub_pd(rinv00,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 46 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_sub_pd(rinv00,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 47 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 8 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*8 + inneriter*47);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwNone_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSh_VdwNone_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 39 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 40 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*40);
 +}
index e16610b3cabd392db83225be42776853e34808fe,0000000000000000000000000000000000000000..a10d4d46344701c5df4c42678426c59e8dd71ff8
mode 100644,000000..100644
--- /dev/null
@@@ -1,1241 -1,0 +1,1241 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwNone_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSh_VdwNone_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_sub_pd(rinv00,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 141 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_sub_pd(rinv00,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 144 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_VF,outeriter*19 + inneriter*144);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwNone_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSh_VdwNone_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 120 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 123 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_F,outeriter*18 + inneriter*123);
 +}
index feae0c1c6d2d010e66970dbc77802d98dc0f28ff,0000000000000000000000000000000000000000..8a7f278ffc7b698e56bcf80d39e4026886af5973
mode 100644,000000..100644
--- /dev/null
@@@ -1,2589 -1,0 +1,2589 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwNone_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSh_VdwNone_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_sub_pd(rinv00,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_sub_pd(rinv01,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_sub_pd(rinv02,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_sub_pd(rinv11,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_sub_pd(rinv12,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_sub_pd(rinv21,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_sub_pd(rinv22,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 414 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_sub_pd(rinv00,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_sub_pd(rinv01,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_sub_pd(rinv02,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_sub_pd(rinv11,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_sub_pd(rinv12,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_sub_pd(rinv21,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_sub_pd(rinv22,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 423 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*423);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwNone_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSh_VdwNone_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 351 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 360 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*360);
 +}
index 31eb2b991d233f8aeaa6531001304de4001247d1,0000000000000000000000000000000000000000..a9d75d641a0af67a6ba9a90e509591970c57db3e
mode 100644,000000..100644
--- /dev/null
@@@ -1,1241 -1,0 +1,1241 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwNone_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSh_VdwNone_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_sub_pd(rinv30,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 141 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_sub_pd(rinv30,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 144 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_VF,outeriter*19 + inneriter*144);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwNone_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSh_VdwNone_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 120 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 123 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_F,outeriter*18 + inneriter*123);
 +}
index 286bdf8e55d9badb1b0e3e3cd6c8f31c590b8463,0000000000000000000000000000000000000000..b839694452f8ba780f7e33fb5f4ebff96280158f
mode 100644,000000..100644
--- /dev/null
@@@ -1,2589 -1,0 +1,2589 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwNone_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSh_VdwNone_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_sub_pd(rinv11,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_sub_pd(rinv12,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_sub_pd(rinv13,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_sub_pd(rinv21,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_sub_pd(rinv22,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_sub_pd(rinv23,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_sub_pd(rinv31,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_sub_pd(rinv32,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_sub_pd(rinv33,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 414 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_sub_pd(rinv11,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_sub_pd(rinv12,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_sub_pd(rinv13,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_sub_pd(rinv21,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_sub_pd(rinv22,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_sub_pd(rinv23,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_sub_pd(rinv31,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_sub_pd(rinv32,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_sub_pd(rinv33,sh_ewald),velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 423 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*423);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwNone_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSh_VdwNone_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 351 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 360 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*360);
 +}
index 41afd21a60f8099c38981aaa780f0528d5d44f9c,0000000000000000000000000000000000000000..136b320ac703b918ef51effcbe92bf77c543b81f
mode 100644,000000..100644
--- /dev/null
@@@ -1,888 -1,0 +1,888 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSw_VdwLJSw_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 83 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 84 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 9 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*84);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSw_VdwLJSw_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 77 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 78 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*78);
 +}
index 13c115c3560230139846795ecb7d4ae3e0b1d159,0000000000000000000000000000000000000000..2ed0f22d52df796783077c8672817f80efb2171d
mode 100644,000000..100644
--- /dev/null
@@@ -1,1526 -1,0 +1,1526 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 216 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 219 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*219);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 204 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 207 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*207);
 +}
index a62cac646a2eda67c03144e3d039730c7cf576fa,0000000000000000000000000000000000000000..7d0b8dc1278712f59be6bae3fa18b1d21dfcb9b9
mode 100644,000000..100644
--- /dev/null
@@@ -1,3156 -1,0 +1,3156 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            d                = _mm256_sub_pd(r01,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            d                = _mm256_sub_pd(r02,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            d                = _mm256_sub_pd(r11,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            d                = _mm256_sub_pd(r12,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            d                = _mm256_sub_pd(r21,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            d                = _mm256_sub_pd(r22,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 603 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            d                = _mm256_sub_pd(r01,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            d                = _mm256_sub_pd(r02,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            d                = _mm256_sub_pd(r11,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            d                = _mm256_sub_pd(r12,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            d                = _mm256_sub_pd(r21,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            d                = _mm256_sub_pd(r22,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 612 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*612);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            d                = _mm256_sub_pd(r01,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            d                = _mm256_sub_pd(r02,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            d                = _mm256_sub_pd(r11,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            d                = _mm256_sub_pd(r12,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            d                = _mm256_sub_pd(r21,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            d                = _mm256_sub_pd(r22,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 573 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            d                = _mm256_sub_pd(r01,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            d                = _mm256_sub_pd(r02,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            d                = _mm256_sub_pd(r11,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            d                = _mm256_sub_pd(r12,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            d                = _mm256_sub_pd(r21,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            d                = _mm256_sub_pd(r22,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 582 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*582);
 +}
index 0d4b027b8a6e25a548aae1791860ea4947de2153,0000000000000000000000000000000000000000..fce1559cc0c7deb4750dd037c76a483e2adf8f06
mode 100644,000000..100644
--- /dev/null
@@@ -1,1740 -1,0 +1,1740 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            d                = _mm256_sub_pd(r30,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 257 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            d                = _mm256_sub_pd(r30,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 261 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*261);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            d                = _mm256_sub_pd(r30,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 245 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            d                = _mm256_sub_pd(r30,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 249 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*249);
 +}
index 81a843dd58259751612b78934801d3d572ec9590,0000000000000000000000000000000000000000..c615fc1ee14f028ca7384e7f2a36179b2bb29ecf
mode 100644,000000..100644
--- /dev/null
@@@ -1,3386 -1,0 +1,3386 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            d                = _mm256_sub_pd(r11,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            d                = _mm256_sub_pd(r12,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            d                = _mm256_sub_pd(r13,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv13,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            d                = _mm256_sub_pd(r21,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            d                = _mm256_sub_pd(r22,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            d                = _mm256_sub_pd(r23,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv23,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            d                = _mm256_sub_pd(r31,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv31,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            d                = _mm256_sub_pd(r32,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv32,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            d                = _mm256_sub_pd(r33,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv33,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 647 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            d                = _mm256_sub_pd(r11,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            d                = _mm256_sub_pd(r12,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            d                = _mm256_sub_pd(r13,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv13,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            d                = _mm256_sub_pd(r21,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            d                = _mm256_sub_pd(r22,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            d                = _mm256_sub_pd(r23,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv23,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            d                = _mm256_sub_pd(r31,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv31,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            d                = _mm256_sub_pd(r32,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv32,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            d                = _mm256_sub_pd(r33,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv33,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 657 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*657);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            d                = _mm256_sub_pd(r11,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            d                = _mm256_sub_pd(r12,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            d                = _mm256_sub_pd(r13,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv13,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            d                = _mm256_sub_pd(r21,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            d                = _mm256_sub_pd(r22,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            d                = _mm256_sub_pd(r23,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv23,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            d                = _mm256_sub_pd(r31,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv31,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            d                = _mm256_sub_pd(r32,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv32,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            d                = _mm256_sub_pd(r33,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv33,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 617 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            d                = _mm256_sub_pd(r11,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            d                = _mm256_sub_pd(r12,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            d                = _mm256_sub_pd(r13,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv13,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            d                = _mm256_sub_pd(r21,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            d                = _mm256_sub_pd(r22,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            d                = _mm256_sub_pd(r23,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv23,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            d                = _mm256_sub_pd(r31,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv31,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            d                = _mm256_sub_pd(r32,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv32,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            d                = _mm256_sub_pd(r33,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv33,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 627 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*627);
 +}
index d823d26276e5f8df8752186dd3f22ea267b25a19,0000000000000000000000000000000000000000..56c73d84fde8ce51f77cecf07c437af5b672c00c
mode 100644,000000..100644
--- /dev/null
@@@ -1,787 -1,0 +1,787 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSw_VdwNone_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 65 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 66 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 8 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*8 + inneriter*66);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSw_VdwNone_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 62 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 63 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*63);
 +}
index 066de6a24ae39d089bc3db61326d0983504cefa6,0000000000000000000000000000000000000000..b62d9f8193532c3ec7b09df8d986ed6ce7d163ef
mode 100644,000000..100644
--- /dev/null
@@@ -1,1425 -1,0 +1,1425 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSw_VdwNone_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 198 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 201 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_VF,outeriter*19 + inneriter*201);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSw_VdwNone_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 189 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 192 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_F,outeriter*18 + inneriter*192);
 +}
index f5c09be1c17f7977ddbe5f1c784ce435d52231c4,0000000000000000000000000000000000000000..aaa795111b5770df54775f1a73fe739b09cd5db7
mode 100644,000000..100644
--- /dev/null
@@@ -1,3085 -1,0 +1,3085 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSw_VdwNone_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            d                = _mm256_sub_pd(r01,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            d                = _mm256_sub_pd(r02,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            d                = _mm256_sub_pd(r11,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            d                = _mm256_sub_pd(r12,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            d                = _mm256_sub_pd(r21,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            d                = _mm256_sub_pd(r22,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 585 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            d                = _mm256_sub_pd(r01,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            d                = _mm256_sub_pd(r02,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            d                = _mm256_sub_pd(r11,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            d                = _mm256_sub_pd(r12,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            d                = _mm256_sub_pd(r21,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            d                = _mm256_sub_pd(r22,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 594 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*594);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSw_VdwNone_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            d                = _mm256_sub_pd(r01,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            d                = _mm256_sub_pd(r02,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            d                = _mm256_sub_pd(r11,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            d                = _mm256_sub_pd(r12,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            d                = _mm256_sub_pd(r21,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            d                = _mm256_sub_pd(r22,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 558 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            d                = _mm256_sub_pd(r01,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            d                = _mm256_sub_pd(r02,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            d                = _mm256_sub_pd(r11,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            d                = _mm256_sub_pd(r12,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            d                = _mm256_sub_pd(r21,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            d                = _mm256_sub_pd(r22,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 567 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*567);
 +}
index c4b8a1d02fdcd43bb3d07a2b1524403871ad7751,0000000000000000000000000000000000000000..d47e204cab6ed015694e4c07ae4efe0273f5a200
mode 100644,000000..100644
--- /dev/null
@@@ -1,1425 -1,0 +1,1425 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSw_VdwNone_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            d                = _mm256_sub_pd(r30,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 198 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            d                = _mm256_sub_pd(r30,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 201 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_VF,outeriter*19 + inneriter*201);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSw_VdwNone_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            d                = _mm256_sub_pd(r30,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 189 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            d                = _mm256_sub_pd(r10,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            d                = _mm256_sub_pd(r20,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            d                = _mm256_sub_pd(r30,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 192 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_F,outeriter*18 + inneriter*192);
 +}
index 6187e5654f18d7bb62c521d5e0c68bce882d3c24,0000000000000000000000000000000000000000..9119c8adb302ea02a7ba3953942b8d11f6308bb7
mode 100644,000000..100644
--- /dev/null
@@@ -1,3085 -1,0 +1,3085 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEwSw_VdwNone_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            d                = _mm256_sub_pd(r11,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            d                = _mm256_sub_pd(r12,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            d                = _mm256_sub_pd(r13,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv13,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            d                = _mm256_sub_pd(r21,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            d                = _mm256_sub_pd(r22,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            d                = _mm256_sub_pd(r23,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv23,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            d                = _mm256_sub_pd(r31,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv31,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            d                = _mm256_sub_pd(r32,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv32,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            d                = _mm256_sub_pd(r33,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv33,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 585 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            d                = _mm256_sub_pd(r11,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            d                = _mm256_sub_pd(r12,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            d                = _mm256_sub_pd(r13,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv13,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            d                = _mm256_sub_pd(r21,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            d                = _mm256_sub_pd(r22,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            d                = _mm256_sub_pd(r23,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv23,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            d                = _mm256_sub_pd(r31,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv31,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            d                = _mm256_sub_pd(r32,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv32,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            d                = _mm256_sub_pd(r33,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv33,_mm256_mul_pd(velec,dsw)) );
 +            velec            = _mm256_mul_pd(velec,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 594 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*594);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEwSw_VdwNone_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            d                = _mm256_sub_pd(r11,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            d                = _mm256_sub_pd(r12,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            d                = _mm256_sub_pd(r13,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv13,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            d                = _mm256_sub_pd(r21,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            d                = _mm256_sub_pd(r22,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            d                = _mm256_sub_pd(r23,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv23,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            d                = _mm256_sub_pd(r31,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv31,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            d                = _mm256_sub_pd(r32,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv32,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            d                = _mm256_sub_pd(r33,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv33,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 558 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            d                = _mm256_sub_pd(r11,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            d                = _mm256_sub_pd(r12,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            d                = _mm256_sub_pd(r13,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv13,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            d                = _mm256_sub_pd(r21,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            d                = _mm256_sub_pd(r22,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            d                = _mm256_sub_pd(r23,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv23,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            d                = _mm256_sub_pd(r31,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv31,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            d                = _mm256_sub_pd(r32,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv32,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            d                = _mm256_sub_pd(r33,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv33,_mm256_mul_pd(velec,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 567 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*567);
 +}
index 107d6ee8df5e697403cf34fdbf947eaf13a82703,0000000000000000000000000000000000000000..cf98382304774bf2572e397a6191a3a3f1951ffd
mode 100644,000000..100644
--- /dev/null
@@@ -1,864 -1,0 +1,864 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwCSTab_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEw_VdwCSTab_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 75 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 76 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 9 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*76);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwCSTab_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEw_VdwCSTab_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 62 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 63 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*63);
 +}
index d2f97f0066b7bfe9704edbc67cb0a2d847a3202a,0000000000000000000000000000000000000000..f49f54923a25ff9a3813f9b6a07a620e9d5c7f6b
mode 100644,000000..100644
--- /dev/null
@@@ -1,1322 -1,0 +1,1322 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwCSTab_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEw_VdwCSTab_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 160 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 163 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*163);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwCSTab_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEw_VdwCSTab_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 137 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 140 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*140);
 +}
index 7f94be251d32943b006ba4b0dd6fa682032696be,0000000000000000000000000000000000000000..1e69cc9f835eed5f3bf76676dfa42af4e235c56d
mode 100644,000000..100644
--- /dev/null
@@@ -1,2412 -1,0 +1,2412 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwCSTab_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEw_VdwCSTab_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 403 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 412 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*412);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwCSTab_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEw_VdwCSTab_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 350 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 359 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*359);
 +}
index 3c1888a21ceb313dc04f67c30c6bedd9890b8748,0000000000000000000000000000000000000000..e0a99dcd7c98854546ccf4bc1adc8ce8eafcd36b
mode 100644,000000..100644
--- /dev/null
@@@ -1,1460 -1,0 +1,1460 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwCSTab_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEw_VdwCSTab_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 182 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 186 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*186);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwCSTab_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEw_VdwCSTab_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 159 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 163 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*163);
 +}
index 0e846bf839ba54d6528fd3761038c689845d7349,0000000000000000000000000000000000000000..52497e021d28de90eef28aa58db66e2692fe156b
mode 100644,000000..100644
--- /dev/null
@@@ -1,2566 -1,0 +1,2566 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwCSTab_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEw_VdwCSTab_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 428 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 438 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*438);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwCSTab_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEw_VdwCSTab_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 375 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 385 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*385);
 +}
index d18c19ca1a75d73e21fe5b5cb79c4e27bfb2700d,0000000000000000000000000000000000000000..fe651a461a346fccd672d0d0666fa7ccc9d49211
mode 100644,000000..100644
--- /dev/null
@@@ -1,746 -1,0 +1,746 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwLJ_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEw_VdwLJ_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 53 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 54 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 9 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*54);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwLJ_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEw_VdwLJ_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 43 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 44 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*44);
 +}
index 5dac58b7137c9320288aed35c455564f11af585c,0000000000000000000000000000000000000000..0d88493ded5973e053c072b0da6f47d7c4032b7c
mode 100644,000000..100644
--- /dev/null
@@@ -1,1204 -1,0 +1,1204 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwLJ_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEw_VdwLJ_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 138 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 141 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*141);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwLJ_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEw_VdwLJ_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 118 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 121 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*121);
 +}
index 34ff8a2fe52a336e5b8fb60bc0a955c5b6bfc331,0000000000000000000000000000000000000000..08e08e91429e5722c88ef2eea3f141331be43456
mode 100644,000000..100644
--- /dev/null
@@@ -1,2294 -1,0 +1,2294 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwLJ_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEw_VdwLJ_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 381 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 390 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*390);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwLJ_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEw_VdwLJ_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 331 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 340 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*340);
 +}
index aad46e99e55a7af1f98eaf86628bf567d844d013,0000000000000000000000000000000000000000..b4aaf0ab0f3932b64e8a3106f83d9da3e3bdaebd
mode 100644,000000..100644
--- /dev/null
@@@ -1,1332 -1,0 +1,1332 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwLJ_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEw_VdwLJ_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 158 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 161 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*161);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwLJ_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEw_VdwLJ_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 138 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 141 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*141);
 +}
index 500c263a6202dc13bb412824a86cb67f2f94f6b3,0000000000000000000000000000000000000000..9378bb338760e9ca6201e3ec0b862ffd9989645f
mode 100644,000000..100644
--- /dev/null
@@@ -1,2438 -1,0 +1,2438 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwLJ_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEw_VdwLJ_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 404 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 413 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*413);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwLJ_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEw_VdwLJ_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 354 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 363 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*363);
 +}
index 4bd38f8a371bf6bdfe386522b268506edcc193db,0000000000000000000000000000000000000000..2fbb38d2a380bfece0643285e4801422b05442ef
mode 100644,000000..100644
--- /dev/null
@@@ -1,659 -1,0 +1,659 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwNone_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEw_VdwNone_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 41 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 42 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 8 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*8 + inneriter*42);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwNone_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEw_VdwNone_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 36 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 37 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*37);
 +}
index 862e4d39eda2b0d229df59d29619416f515e6645,0000000000000000000000000000000000000000..e1536bbb597b431fc51245c7c2f6675bc51a9ddf
mode 100644,000000..100644
--- /dev/null
@@@ -1,1117 -1,0 +1,1117 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwNone_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEw_VdwNone_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 126 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 129 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_VF,outeriter*19 + inneriter*129);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwNone_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEw_VdwNone_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 111 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 114 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_F,outeriter*18 + inneriter*114);
 +}
index 296f26c87f26cd9ddc7e5edc52b0d5d5f4522f5c,0000000000000000000000000000000000000000..ef4dea7cd8bd965308665294642cf83a0f8bcd80
mode 100644,000000..100644
--- /dev/null
@@@ -1,2237 -1,0 +1,2237 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwNone_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEw_VdwNone_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 369 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 378 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*378);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwNone_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEw_VdwNone_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 324 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r00,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r01              = _mm256_mul_pd(rsq01,rinv01);
 +            r01              = _mm256_andnot_pd(dummy_mask,r01);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r01,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r02              = _mm256_mul_pd(rsq02,rinv02);
 +            r02              = _mm256_andnot_pd(dummy_mask,r02);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r02,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 333 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*333);
 +}
index ccc91c007bf0553b5bb45f3d4b1967d1ce9243eb,0000000000000000000000000000000000000000..db46c0416a8aba75429ce0924ed7bbff66d13660
mode 100644,000000..100644
--- /dev/null
@@@ -1,1117 -1,0 +1,1117 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwNone_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEw_VdwNone_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 126 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 129 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_VF,outeriter*19 + inneriter*129);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwNone_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEw_VdwNone_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 111 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r10              = _mm256_mul_pd(rsq10,rinv10);
 +            r10              = _mm256_andnot_pd(dummy_mask,r10);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r10,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r20              = _mm256_mul_pd(rsq20,rinv20);
 +            r20              = _mm256_andnot_pd(dummy_mask,r20);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r20,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r30              = _mm256_mul_pd(rsq30,rinv30);
 +            r30              = _mm256_andnot_pd(dummy_mask,r30);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r30,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 114 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_F,outeriter*18 + inneriter*114);
 +}
index 7ee0e48366ef70bb66602a6f636af83b55730841,0000000000000000000000000000000000000000..b40eaed3b3d6ae01302ea347708c2f600f844328
mode 100644,000000..100644
--- /dev/null
@@@ -1,2237 -1,0 +1,2237 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwNone_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecEw_VdwNone_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 369 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 378 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*378);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwNone_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: Ewald
 + * VdW interaction:            None
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecEw_VdwNone_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 324 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r11              = _mm256_mul_pd(rsq11,rinv11);
 +            r11              = _mm256_andnot_pd(dummy_mask,r11);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r11,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r12              = _mm256_mul_pd(rsq12,rinv12);
 +            r12              = _mm256_andnot_pd(dummy_mask,r12);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r12,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r13              = _mm256_mul_pd(rsq13,rinv13);
 +            r13              = _mm256_andnot_pd(dummy_mask,r13);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r13,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r21              = _mm256_mul_pd(rsq21,rinv21);
 +            r21              = _mm256_andnot_pd(dummy_mask,r21);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r21,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r22              = _mm256_mul_pd(rsq22,rinv22);
 +            r22              = _mm256_andnot_pd(dummy_mask,r22);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r22,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r23              = _mm256_mul_pd(rsq23,rinv23);
 +            r23              = _mm256_andnot_pd(dummy_mask,r23);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r23,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r31              = _mm256_mul_pd(rsq31,rinv31);
 +            r31              = _mm256_andnot_pd(dummy_mask,r31);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r31,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r32              = _mm256_mul_pd(rsq32,rinv32);
 +            r32              = _mm256_andnot_pd(dummy_mask,r32);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r32,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r33              = _mm256_mul_pd(rsq33,rinv33);
 +            r33              = _mm256_andnot_pd(dummy_mask,r33);
 +
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r33,ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 333 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*333);
 +}
index b3f1d800172fd3590571ca132337d144cf3a9ce9,0000000000000000000000000000000000000000..3ab68aaedb747e5e5ea77473180ea329aa93a606
mode 100644,000000..100644
--- /dev/null
@@@ -1,957 -1,0 +1,957 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecGB_VdwCSTab_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: GeneralizedBorn
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecGB_VdwCSTab_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          gbitab;
 +    __m256d          vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,dvdatmp;
 +    __m256d          minushalf = _mm256_set1_pd(-0.5);
 +    real             *invsqrta,*dvda,*gbtab;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    invsqrta         = fr->invsqrta;
 +    dvda             = fr->dvda;
 +    gbtabscale       = _mm256_set1_pd(fr->gbtab.scale);
 +    gbtab            = fr->gbtab.data;
 +    gbinvepsdiff     = _mm256_set1_pd((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        isai0            = _mm256_set1_pd(invsqrta[inr+0]);
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vgbsum           = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +        dvdasum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            isaj0            = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+0,invsqrta+jnrB+0,
 +                                                                 invsqrta+jnrC+0,invsqrta+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
 +            isaprod          = _mm256_mul_pd(isai0,isaj0);
 +            gbqqfactor       = _mm256_xor_pd(signbit,_mm256_mul_pd(qq00,_mm256_mul_pd(isaprod,gbinvepsdiff)));
 +            gbscale          = _mm256_mul_pd(isaprod,gbtabscale);
 +
 +            /* Calculate generalized born table index - this is a separate table from the normal one,
 +             * but we use the same procedure by multiplying r with scale and truncating to integer.
 +             */
 +            rt               = _mm256_mul_pd(r00,gbscale);
 +            gbitab           = _mm256_cvttpd_epi32(rt);
 +            gbeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            gbitab           = _mm_slli_epi32(gbitab,2);
 +            Y                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
 +            F                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
 +            G                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
 +            H                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(gbeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
 +            vgb              = _mm256_mul_pd(gbqqfactor,VV);
 +
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fgb              = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
 +            dvdatmp          = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r00)));
 +            dvdasum          = _mm256_add_pd(dvdasum,dvdatmp);
 +            fjptrA           = dvda+jnrA;
 +            fjptrB           = dvda+jnrB;
 +            fjptrC           = dvda+jnrC;
 +            fjptrD           = dvda+jnrD;
 +            gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj0,isaj0)));
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv00),fgb),rinv00);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vgbsum           = _mm256_add_pd(vgbsum,vgb);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 91 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            isaj0            = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+0,invsqrta+jnrB+0,
 +                                                                 invsqrta+jnrC+0,invsqrta+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
 +            isaprod          = _mm256_mul_pd(isai0,isaj0);
 +            gbqqfactor       = _mm256_xor_pd(signbit,_mm256_mul_pd(qq00,_mm256_mul_pd(isaprod,gbinvepsdiff)));
 +            gbscale          = _mm256_mul_pd(isaprod,gbtabscale);
 +
 +            /* Calculate generalized born table index - this is a separate table from the normal one,
 +             * but we use the same procedure by multiplying r with scale and truncating to integer.
 +             */
 +            rt               = _mm256_mul_pd(r00,gbscale);
 +            gbitab           = _mm256_cvttpd_epi32(rt);
 +            gbeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            gbitab           = _mm_slli_epi32(gbitab,2);
 +            Y                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
 +            F                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
 +            G                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
 +            H                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(gbeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
 +            vgb              = _mm256_mul_pd(gbqqfactor,VV);
 +
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fgb              = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
 +            dvdatmp          = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r00)));
 +            dvdasum          = _mm256_add_pd(dvdasum,dvdatmp);
 +            /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
 +            fjptrA             = (jnrlistA>=0) ? dvda+jnrA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? dvda+jnrB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? dvda+jnrC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? dvda+jnrD : scratch;
 +            gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj0,isaj0)));
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv00),fgb),rinv00);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vgb              = _mm256_andnot_pd(dummy_mask,vgb);
 +            vgbsum           = _mm256_add_pd(vgbsum,vgb);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 92 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vgbsum,kernel_data->energygrp_polarization+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +        dvdasum = _mm256_mul_pd(dvdasum, _mm256_mul_pd(isai0,isai0));
 +        gmx_mm256_update_1pot_pd(dvdasum,dvda+inr);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 10 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*10 + inneriter*92);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecGB_VdwCSTab_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: GeneralizedBorn
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecGB_VdwCSTab_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          gbitab;
 +    __m256d          vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,dvdatmp;
 +    __m256d          minushalf = _mm256_set1_pd(-0.5);
 +    real             *invsqrta,*dvda,*gbtab;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    invsqrta         = fr->invsqrta;
 +    dvda             = fr->dvda;
 +    gbtabscale       = _mm256_set1_pd(fr->gbtab.scale);
 +    gbtab            = fr->gbtab.data;
 +    gbinvepsdiff     = _mm256_set1_pd((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        isai0            = _mm256_set1_pd(invsqrta[inr+0]);
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        dvdasum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            isaj0            = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+0,invsqrta+jnrB+0,
 +                                                                 invsqrta+jnrC+0,invsqrta+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
 +            isaprod          = _mm256_mul_pd(isai0,isaj0);
 +            gbqqfactor       = _mm256_xor_pd(signbit,_mm256_mul_pd(qq00,_mm256_mul_pd(isaprod,gbinvepsdiff)));
 +            gbscale          = _mm256_mul_pd(isaprod,gbtabscale);
 +
 +            /* Calculate generalized born table index - this is a separate table from the normal one,
 +             * but we use the same procedure by multiplying r with scale and truncating to integer.
 +             */
 +            rt               = _mm256_mul_pd(r00,gbscale);
 +            gbitab           = _mm256_cvttpd_epi32(rt);
 +            gbeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            gbitab           = _mm_slli_epi32(gbitab,2);
 +            Y                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
 +            F                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
 +            G                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
 +            H                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(gbeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
 +            vgb              = _mm256_mul_pd(gbqqfactor,VV);
 +
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fgb              = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
 +            dvdatmp          = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r00)));
 +            dvdasum          = _mm256_add_pd(dvdasum,dvdatmp);
 +            fjptrA           = dvda+jnrA;
 +            fjptrB           = dvda+jnrB;
 +            fjptrC           = dvda+jnrC;
 +            fjptrD           = dvda+jnrD;
 +            gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj0,isaj0)));
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv00),fgb),rinv00);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 81 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            isaj0            = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+0,invsqrta+jnrB+0,
 +                                                                 invsqrta+jnrC+0,invsqrta+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
 +            isaprod          = _mm256_mul_pd(isai0,isaj0);
 +            gbqqfactor       = _mm256_xor_pd(signbit,_mm256_mul_pd(qq00,_mm256_mul_pd(isaprod,gbinvepsdiff)));
 +            gbscale          = _mm256_mul_pd(isaprod,gbtabscale);
 +
 +            /* Calculate generalized born table index - this is a separate table from the normal one,
 +             * but we use the same procedure by multiplying r with scale and truncating to integer.
 +             */
 +            rt               = _mm256_mul_pd(r00,gbscale);
 +            gbitab           = _mm256_cvttpd_epi32(rt);
 +            gbeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            gbitab           = _mm_slli_epi32(gbitab,2);
 +            Y                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
 +            F                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
 +            G                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
 +            H                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(gbeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
 +            vgb              = _mm256_mul_pd(gbqqfactor,VV);
 +
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fgb              = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
 +            dvdatmp          = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r00)));
 +            dvdasum          = _mm256_add_pd(dvdasum,dvdatmp);
 +            /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
 +            fjptrA             = (jnrlistA>=0) ? dvda+jnrA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? dvda+jnrB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? dvda+jnrC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? dvda+jnrD : scratch;
 +            gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj0,isaj0)));
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv00),fgb),rinv00);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 82 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        dvdasum = _mm256_mul_pd(dvdasum, _mm256_mul_pd(isai0,isai0));
 +        gmx_mm256_update_1pot_pd(dvdasum,dvda+inr);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*82);
 +}
index e1ca1e33786677ca487746b80810516a6d5a3a99,0000000000000000000000000000000000000000..abb078218264047f347d7ae3230a47d36d410667
mode 100644,000000..100644
--- /dev/null
@@@ -1,855 -1,0 +1,855 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecGB_VdwLJ_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: GeneralizedBorn
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecGB_VdwLJ_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          gbitab;
 +    __m256d          vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,dvdatmp;
 +    __m256d          minushalf = _mm256_set1_pd(-0.5);
 +    real             *invsqrta,*dvda,*gbtab;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    invsqrta         = fr->invsqrta;
 +    dvda             = fr->dvda;
 +    gbtabscale       = _mm256_set1_pd(fr->gbtab.scale);
 +    gbtab            = fr->gbtab.data;
 +    gbinvepsdiff     = _mm256_set1_pd((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        isai0            = _mm256_set1_pd(invsqrta[inr+0]);
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vgbsum           = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +        dvdasum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            isaj0            = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+0,invsqrta+jnrB+0,
 +                                                                 invsqrta+jnrC+0,invsqrta+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
 +            isaprod          = _mm256_mul_pd(isai0,isaj0);
 +            gbqqfactor       = _mm256_xor_pd(signbit,_mm256_mul_pd(qq00,_mm256_mul_pd(isaprod,gbinvepsdiff)));
 +            gbscale          = _mm256_mul_pd(isaprod,gbtabscale);
 +
 +            /* Calculate generalized born table index - this is a separate table from the normal one,
 +             * but we use the same procedure by multiplying r with scale and truncating to integer.
 +             */
 +            rt               = _mm256_mul_pd(r00,gbscale);
 +            gbitab           = _mm256_cvttpd_epi32(rt);
 +            gbeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            gbitab           = _mm_slli_epi32(gbitab,2);
 +            Y                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
 +            F                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
 +            G                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
 +            H                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(gbeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
 +            vgb              = _mm256_mul_pd(gbqqfactor,VV);
 +
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fgb              = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
 +            dvdatmp          = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r00)));
 +            dvdasum          = _mm256_add_pd(dvdasum,dvdatmp);
 +            fjptrA           = dvda+jnrA;
 +            fjptrB           = dvda+jnrB;
 +            fjptrC           = dvda+jnrC;
 +            fjptrD           = dvda+jnrD;
 +            gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj0,isaj0)));
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv00),fgb),rinv00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vgbsum           = _mm256_add_pd(vgbsum,vgb);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 70 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            isaj0            = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+0,invsqrta+jnrB+0,
 +                                                                 invsqrta+jnrC+0,invsqrta+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
 +            isaprod          = _mm256_mul_pd(isai0,isaj0);
 +            gbqqfactor       = _mm256_xor_pd(signbit,_mm256_mul_pd(qq00,_mm256_mul_pd(isaprod,gbinvepsdiff)));
 +            gbscale          = _mm256_mul_pd(isaprod,gbtabscale);
 +
 +            /* Calculate generalized born table index - this is a separate table from the normal one,
 +             * but we use the same procedure by multiplying r with scale and truncating to integer.
 +             */
 +            rt               = _mm256_mul_pd(r00,gbscale);
 +            gbitab           = _mm256_cvttpd_epi32(rt);
 +            gbeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            gbitab           = _mm_slli_epi32(gbitab,2);
 +            Y                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
 +            F                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
 +            G                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
 +            H                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(gbeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
 +            vgb              = _mm256_mul_pd(gbqqfactor,VV);
 +
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fgb              = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
 +            dvdatmp          = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r00)));
 +            dvdasum          = _mm256_add_pd(dvdasum,dvdatmp);
 +            /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
 +            fjptrA             = (jnrlistA>=0) ? dvda+jnrA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? dvda+jnrB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? dvda+jnrC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? dvda+jnrD : scratch;
 +            gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj0,isaj0)));
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv00),fgb),rinv00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vgb              = _mm256_andnot_pd(dummy_mask,vgb);
 +            vgbsum           = _mm256_add_pd(vgbsum,vgb);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 71 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vgbsum,kernel_data->energygrp_polarization+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +        dvdasum = _mm256_mul_pd(dvdasum, _mm256_mul_pd(isai0,isai0));
 +        gmx_mm256_update_1pot_pd(dvdasum,dvda+inr);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 10 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*10 + inneriter*71);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecGB_VdwLJ_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: GeneralizedBorn
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecGB_VdwLJ_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          gbitab;
 +    __m256d          vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,dvdatmp;
 +    __m256d          minushalf = _mm256_set1_pd(-0.5);
 +    real             *invsqrta,*dvda,*gbtab;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    invsqrta         = fr->invsqrta;
 +    dvda             = fr->dvda;
 +    gbtabscale       = _mm256_set1_pd(fr->gbtab.scale);
 +    gbtab            = fr->gbtab.data;
 +    gbinvepsdiff     = _mm256_set1_pd((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        isai0            = _mm256_set1_pd(invsqrta[inr+0]);
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        dvdasum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            isaj0            = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+0,invsqrta+jnrB+0,
 +                                                                 invsqrta+jnrC+0,invsqrta+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
 +            isaprod          = _mm256_mul_pd(isai0,isaj0);
 +            gbqqfactor       = _mm256_xor_pd(signbit,_mm256_mul_pd(qq00,_mm256_mul_pd(isaprod,gbinvepsdiff)));
 +            gbscale          = _mm256_mul_pd(isaprod,gbtabscale);
 +
 +            /* Calculate generalized born table index - this is a separate table from the normal one,
 +             * but we use the same procedure by multiplying r with scale and truncating to integer.
 +             */
 +            rt               = _mm256_mul_pd(r00,gbscale);
 +            gbitab           = _mm256_cvttpd_epi32(rt);
 +            gbeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            gbitab           = _mm_slli_epi32(gbitab,2);
 +            Y                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
 +            F                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
 +            G                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
 +            H                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(gbeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
 +            vgb              = _mm256_mul_pd(gbqqfactor,VV);
 +
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fgb              = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
 +            dvdatmp          = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r00)));
 +            dvdasum          = _mm256_add_pd(dvdasum,dvdatmp);
 +            fjptrA           = dvda+jnrA;
 +            fjptrB           = dvda+jnrB;
 +            fjptrC           = dvda+jnrC;
 +            fjptrD           = dvda+jnrD;
 +            gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj0,isaj0)));
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv00),fgb),rinv00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 63 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            isaj0            = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+0,invsqrta+jnrB+0,
 +                                                                 invsqrta+jnrC+0,invsqrta+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
 +            isaprod          = _mm256_mul_pd(isai0,isaj0);
 +            gbqqfactor       = _mm256_xor_pd(signbit,_mm256_mul_pd(qq00,_mm256_mul_pd(isaprod,gbinvepsdiff)));
 +            gbscale          = _mm256_mul_pd(isaprod,gbtabscale);
 +
 +            /* Calculate generalized born table index - this is a separate table from the normal one,
 +             * but we use the same procedure by multiplying r with scale and truncating to integer.
 +             */
 +            rt               = _mm256_mul_pd(r00,gbscale);
 +            gbitab           = _mm256_cvttpd_epi32(rt);
 +            gbeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            gbitab           = _mm_slli_epi32(gbitab,2);
 +            Y                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
 +            F                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
 +            G                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
 +            H                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(gbeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
 +            vgb              = _mm256_mul_pd(gbqqfactor,VV);
 +
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fgb              = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
 +            dvdatmp          = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r00)));
 +            dvdasum          = _mm256_add_pd(dvdasum,dvdatmp);
 +            /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
 +            fjptrA             = (jnrlistA>=0) ? dvda+jnrA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? dvda+jnrB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? dvda+jnrC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? dvda+jnrD : scratch;
 +            gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj0,isaj0)));
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv00),fgb),rinv00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 64 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        dvdasum = _mm256_mul_pd(dvdasum, _mm256_mul_pd(isai0,isai0));
 +        gmx_mm256_update_1pot_pd(dvdasum,dvda+inr);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*64);
 +}
index 4976a5da811df01ddc5927f14d1d68b8c1db6382,0000000000000000000000000000000000000000..27c0bb96b2ece012f609ed02305ac24fcadfe699
mode 100644,000000..100644
--- /dev/null
@@@ -1,760 -1,0 +1,760 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecGB_VdwNone_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: GeneralizedBorn
 + * VdW interaction:            None
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecGB_VdwNone_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          gbitab;
 +    __m256d          vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,dvdatmp;
 +    __m256d          minushalf = _mm256_set1_pd(-0.5);
 +    real             *invsqrta,*dvda,*gbtab;
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    invsqrta         = fr->invsqrta;
 +    dvda             = fr->dvda;
 +    gbtabscale       = _mm256_set1_pd(fr->gbtab.scale);
 +    gbtab            = fr->gbtab.data;
 +    gbinvepsdiff     = _mm256_set1_pd((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        isai0            = _mm256_set1_pd(invsqrta[inr+0]);
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vgbsum           = _mm256_setzero_pd();
 +        dvdasum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            isaj0            = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+0,invsqrta+jnrB+0,
 +                                                                 invsqrta+jnrC+0,invsqrta+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
 +            isaprod          = _mm256_mul_pd(isai0,isaj0);
 +            gbqqfactor       = _mm256_xor_pd(signbit,_mm256_mul_pd(qq00,_mm256_mul_pd(isaprod,gbinvepsdiff)));
 +            gbscale          = _mm256_mul_pd(isaprod,gbtabscale);
 +
 +            /* Calculate generalized born table index - this is a separate table from the normal one,
 +             * but we use the same procedure by multiplying r with scale and truncating to integer.
 +             */
 +            rt               = _mm256_mul_pd(r00,gbscale);
 +            gbitab           = _mm256_cvttpd_epi32(rt);
 +            gbeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            gbitab           = _mm_slli_epi32(gbitab,2);
 +            Y                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
 +            F                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
 +            G                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
 +            H                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(gbeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
 +            vgb              = _mm256_mul_pd(gbqqfactor,VV);
 +
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fgb              = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
 +            dvdatmp          = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r00)));
 +            dvdasum          = _mm256_add_pd(dvdasum,dvdatmp);
 +            fjptrA           = dvda+jnrA;
 +            fjptrB           = dvda+jnrB;
 +            fjptrC           = dvda+jnrC;
 +            fjptrD           = dvda+jnrD;
 +            gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj0,isaj0)));
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv00),fgb),rinv00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vgbsum           = _mm256_add_pd(vgbsum,vgb);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 57 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            isaj0            = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+0,invsqrta+jnrB+0,
 +                                                                 invsqrta+jnrC+0,invsqrta+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
 +            isaprod          = _mm256_mul_pd(isai0,isaj0);
 +            gbqqfactor       = _mm256_xor_pd(signbit,_mm256_mul_pd(qq00,_mm256_mul_pd(isaprod,gbinvepsdiff)));
 +            gbscale          = _mm256_mul_pd(isaprod,gbtabscale);
 +
 +            /* Calculate generalized born table index - this is a separate table from the normal one,
 +             * but we use the same procedure by multiplying r with scale and truncating to integer.
 +             */
 +            rt               = _mm256_mul_pd(r00,gbscale);
 +            gbitab           = _mm256_cvttpd_epi32(rt);
 +            gbeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            gbitab           = _mm_slli_epi32(gbitab,2);
 +            Y                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
 +            F                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
 +            G                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
 +            H                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(gbeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
 +            vgb              = _mm256_mul_pd(gbqqfactor,VV);
 +
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fgb              = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
 +            dvdatmp          = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r00)));
 +            dvdasum          = _mm256_add_pd(dvdasum,dvdatmp);
 +            /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
 +            fjptrA             = (jnrlistA>=0) ? dvda+jnrA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? dvda+jnrB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? dvda+jnrC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? dvda+jnrD : scratch;
 +            gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj0,isaj0)));
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv00),fgb),rinv00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vgb              = _mm256_andnot_pd(dummy_mask,vgb);
 +            vgbsum           = _mm256_add_pd(vgbsum,vgb);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 58 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vgbsum,kernel_data->energygrp_polarization+ggid);
 +        dvdasum = _mm256_mul_pd(dvdasum, _mm256_mul_pd(isai0,isai0));
 +        gmx_mm256_update_1pot_pd(dvdasum,dvda+inr);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 9 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*9 + inneriter*58);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecGB_VdwNone_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: GeneralizedBorn
 + * VdW interaction:            None
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecGB_VdwNone_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m128i          gbitab;
 +    __m256d          vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,dvdatmp;
 +    __m256d          minushalf = _mm256_set1_pd(-0.5);
 +    real             *invsqrta,*dvda,*gbtab;
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +
 +    invsqrta         = fr->invsqrta;
 +    dvda             = fr->dvda;
 +    gbtabscale       = _mm256_set1_pd(fr->gbtab.scale);
 +    gbtab            = fr->gbtab.data;
 +    gbinvepsdiff     = _mm256_set1_pd((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        isai0            = _mm256_set1_pd(invsqrta[inr+0]);
 +
 +        dvdasum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            isaj0            = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+0,invsqrta+jnrB+0,
 +                                                                 invsqrta+jnrC+0,invsqrta+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
 +            isaprod          = _mm256_mul_pd(isai0,isaj0);
 +            gbqqfactor       = _mm256_xor_pd(signbit,_mm256_mul_pd(qq00,_mm256_mul_pd(isaprod,gbinvepsdiff)));
 +            gbscale          = _mm256_mul_pd(isaprod,gbtabscale);
 +
 +            /* Calculate generalized born table index - this is a separate table from the normal one,
 +             * but we use the same procedure by multiplying r with scale and truncating to integer.
 +             */
 +            rt               = _mm256_mul_pd(r00,gbscale);
 +            gbitab           = _mm256_cvttpd_epi32(rt);
 +            gbeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            gbitab           = _mm_slli_epi32(gbitab,2);
 +            Y                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
 +            F                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
 +            G                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
 +            H                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(gbeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
 +            vgb              = _mm256_mul_pd(gbqqfactor,VV);
 +
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fgb              = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
 +            dvdatmp          = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r00)));
 +            dvdasum          = _mm256_add_pd(dvdasum,dvdatmp);
 +            fjptrA           = dvda+jnrA;
 +            fjptrB           = dvda+jnrB;
 +            fjptrC           = dvda+jnrC;
 +            fjptrD           = dvda+jnrD;
 +            gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj0,isaj0)));
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv00),fgb),rinv00);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 55 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            isaj0            = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+0,invsqrta+jnrB+0,
 +                                                                 invsqrta+jnrC+0,invsqrta+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
 +            isaprod          = _mm256_mul_pd(isai0,isaj0);
 +            gbqqfactor       = _mm256_xor_pd(signbit,_mm256_mul_pd(qq00,_mm256_mul_pd(isaprod,gbinvepsdiff)));
 +            gbscale          = _mm256_mul_pd(isaprod,gbtabscale);
 +
 +            /* Calculate generalized born table index - this is a separate table from the normal one,
 +             * but we use the same procedure by multiplying r with scale and truncating to integer.
 +             */
 +            rt               = _mm256_mul_pd(r00,gbscale);
 +            gbitab           = _mm256_cvttpd_epi32(rt);
 +            gbeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            gbitab           = _mm_slli_epi32(gbitab,2);
 +            Y                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
 +            F                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
 +            G                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
 +            H                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(gbeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
 +            vgb              = _mm256_mul_pd(gbqqfactor,VV);
 +
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fgb              = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
 +            dvdatmp          = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r00)));
 +            dvdasum          = _mm256_add_pd(dvdasum,dvdatmp);
 +            /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
 +            fjptrA             = (jnrlistA>=0) ? dvda+jnrA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? dvda+jnrB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? dvda+jnrC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? dvda+jnrD : scratch;
 +            gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj0,isaj0)));
 +            velec            = _mm256_mul_pd(qq00,rinv00);
 +            felec            = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv00),fgb),rinv00);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 56 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        dvdasum = _mm256_mul_pd(dvdasum, _mm256_mul_pd(isai0,isai0));
 +        gmx_mm256_update_1pot_pd(dvdasum,dvda+inr);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*56);
 +}
index 621a3c6a2ea3b505d1e4f7b95ebbf82b82efedd3,0000000000000000000000000000000000000000..61b04cc81df57fb3ed7cf7402f4e57c206156ca7
mode 100644,000000..100644
--- /dev/null
@@@ -1,745 -1,0 +1,745 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecNone_VdwCSTab_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: None
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecNone_VdwCSTab_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 56 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 57 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_VDW_VF,outeriter*7 + inneriter*57);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecNone_VdwCSTab_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: None
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecNone_VdwCSTab_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 48 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 49 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 6 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_VDW_F,outeriter*6 + inneriter*49);
 +}
index d49028ca98e44f6e4a95582b293d9575f1a1aee6,0000000000000000000000000000000000000000..a1f2f961494d709d0123399ff86114162615ed74
mode 100644,000000..100644
--- /dev/null
@@@ -1,671 -1,0 +1,671 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecNone_VdwLJSh_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: None
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecNone_VdwLJSh_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    rcutoff_scalar   = fr->rvdw;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 41 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 41 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_VDW_VF,outeriter*7 + inneriter*41);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecNone_VdwLJSh_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: None
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecNone_VdwLJSh_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    rcutoff_scalar   = fr->rvdw;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 30 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 30 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 6 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_VDW_F,outeriter*6 + inneriter*30);
 +}
index 13a6a80bc1607a3f43198f01029706fd5eb480bc,0000000000000000000000000000000000000000..cfbcdad64cfdcfe5622bea6b66192f89398568b6
mode 100644,000000..100644
--- /dev/null
@@@ -1,757 -1,0 +1,757 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecNone_VdwLJSw_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: None
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecNone_VdwLJSw_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    rcutoff_scalar   = fr->rvdw;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rvdw_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 59 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 60 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_VDW_VF,outeriter*7 + inneriter*60);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecNone_VdwLJSw_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: None
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecNone_VdwLJSw_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    rcutoff_scalar   = fr->rvdw;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rvdw_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 56 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 57 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 6 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_VDW_F,outeriter*6 + inneriter*57);
 +}
index 979835935eebeb3f85846452260838c44e203f33,0000000000000000000000000000000000000000..f4395ddcaa3757fcdaa54e20c263c4e07bf14f89
mode 100644,000000..100644
--- /dev/null
@@@ -1,617 -1,0 +1,617 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecNone_VdwLJ_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: None
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecNone_VdwLJ_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 32 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 32 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_VDW_VF,outeriter*7 + inneriter*32);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecNone_VdwLJ_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: None
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecNone_VdwLJ_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 27 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +
 +            /* Load parameters for j particles */
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 27 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 6 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_VDW_F,outeriter*6 + inneriter*27);
 +}
index 3d72e2377f328dd4d07037568eabd9cfed0f8f49,0000000000000000000000000000000000000000..1e41148ecc85edf90421304cc2c3d798b1f8af10
mode 100644,000000..100644
--- /dev/null
@@@ -1,850 -1,0 +1,850 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 72 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 73 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 9 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*73);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 57 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 58 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*58);
 +}
index 2692ae11bbd623bbcdb40727f04ba547c0cdfd79,0000000000000000000000000000000000000000..dd9712d5b757957030c3324031a3e8fb5c4b13b6
mode 100644,000000..100644
--- /dev/null
@@@ -1,1276 -1,0 +1,1276 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwCSTab_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 147 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 148 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*148);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwCSTab_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 120 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 121 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*121);
 +}
index f853fa8828b13f23823aa7b4990ea1ad367dac67,0000000000000000000000000000000000000000..c9bc102ef9538507a6216d3353af986ccc614784
mode 100644,000000..100644
--- /dev/null
@@@ -1,2270 -1,0 +1,2270 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 360 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 361 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*361);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 297 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 298 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*298);
 +}
index 900f85d01bf149e5bf427d67007f2b8c1a5757f7,0000000000000000000000000000000000000000..acfceb4014c20f5bd0e7a0e3dc7333da0447d13f
mode 100644,000000..100644
--- /dev/null
@@@ -1,1402 -1,0 +1,1402 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwCSTab_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_add_pd(rinv30,_mm256_mul_pd(krf,rsq30)),crf));
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 167 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_add_pd(rinv30,_mm256_mul_pd(krf,rsq30)),crf));
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 168 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*168);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwCSTab_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 141 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 142 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*142);
 +}
index e1345c5ec5914abb5bccedfd338dfc1c730662e1,0000000000000000000000000000000000000000..9d0f22f85a9002eaede57c20a0e02a6b1452508f
mode 100644,000000..100644
--- /dev/null
@@@ -1,2450 -1,0 +1,2450 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_add_pd(rinv13,_mm256_mul_pd(krf,rsq13)),crf));
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_add_pd(rinv23,_mm256_mul_pd(krf,rsq23)),crf));
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_add_pd(rinv31,_mm256_mul_pd(krf,rsq31)),crf));
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_add_pd(rinv32,_mm256_mul_pd(krf,rsq32)),crf));
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_add_pd(rinv33,_mm256_mul_pd(krf,rsq33)),crf));
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 387 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_add_pd(rinv13,_mm256_mul_pd(krf,rsq13)),crf));
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_add_pd(rinv23,_mm256_mul_pd(krf,rsq23)),crf));
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_add_pd(rinv31,_mm256_mul_pd(krf,rsq31)),crf));
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_add_pd(rinv32,_mm256_mul_pd(krf,rsq32)),crf));
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_add_pd(rinv33,_mm256_mul_pd(krf,rsq33)),crf));
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 388 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*388);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 324 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 325 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*325);
 +}
index 2c4c3457cd9f77edc07b075084ec4f7286e03573,0000000000000000000000000000000000000000..5cc28cea2f8dcd0662595091c964c808b8237af8
mode 100644,000000..100644
--- /dev/null
@@@ -1,730 -1,0 +1,730 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSh_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSh_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 54 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 54 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 9 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*54);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSh_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSh_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 37 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 37 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*37);
 +}
index efc7adbdb0e0563e32b46aa8dc2fc376f86817c7,0000000000000000000000000000000000000000..00bd89dd476c1a9a0fe47426c06969513e770598
mode 100644,000000..100644
--- /dev/null
@@@ -1,1156 -1,0 +1,1156 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSh_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSh_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 129 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 129 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*129);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSh_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSh_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 100 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 100 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*100);
 +}
index 7f5ba1dd765fcff230ddca16ed29058090847f25,0000000000000000000000000000000000000000..9a38a58cf7b561c28dbe6cd15f7d97f5acde347e
mode 100644,000000..100644
--- /dev/null
@@@ -1,2150 -1,0 +1,2150 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 342 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 342 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*342);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 277 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 277 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*277);
 +}
index 61320ae427bf37165c83d2471b12941ac705929a,0000000000000000000000000000000000000000..bae5ae38d38287a18a109dc6d24d99e2d286a26e
mode 100644,000000..100644
--- /dev/null
@@@ -1,1320 -1,0 +1,1320 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSh_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSh_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_add_pd(rinv30,_mm256_mul_pd(krf,rsq30)),crf));
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 152 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_add_pd(rinv30,_mm256_mul_pd(krf,rsq30)),crf));
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 152 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*152);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSh_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSh_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 123 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 123 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*123);
 +}
index 245f282a05c915993f9da58e6d80bf50663d0760,0000000000000000000000000000000000000000..8b559aaa018f25dd756de7eb6779e52fb6361f23
mode 100644,000000..100644
--- /dev/null
@@@ -1,2330 -1,0 +1,2330 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSh_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSh_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_add_pd(rinv13,_mm256_mul_pd(krf,rsq13)),crf));
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_add_pd(rinv23,_mm256_mul_pd(krf,rsq23)),crf));
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_add_pd(rinv31,_mm256_mul_pd(krf,rsq31)),crf));
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_add_pd(rinv32,_mm256_mul_pd(krf,rsq32)),crf));
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_add_pd(rinv33,_mm256_mul_pd(krf,rsq33)),crf));
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 368 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_add_pd(rinv13,_mm256_mul_pd(krf,rsq13)),crf));
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_add_pd(rinv23,_mm256_mul_pd(krf,rsq23)),crf));
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_add_pd(rinv31,_mm256_mul_pd(krf,rsq31)),crf));
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_add_pd(rinv32,_mm256_mul_pd(krf,rsq32)),crf));
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_add_pd(rinv33,_mm256_mul_pd(krf,rsq33)),crf));
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 368 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*368);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSh_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSh_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 303 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 303 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*303);
 +}
index 6bf8d2f90bdb64540014f6d80bd132a6bc21ae2f,0000000000000000000000000000000000000000..1969fd42d5d975e29e0238b1d43590dff2ea16ce
mode 100644,000000..100644
--- /dev/null
@@@ -1,808 -1,0 +1,808 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSw_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSw_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rvdw_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 70 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 71 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 9 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*71);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSw_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSw_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rvdw_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 61 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 62 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*62);
 +}
index 54621eea3570059239f2374576a5b90c3f93ce43,0000000000000000000000000000000000000000..2ac4ec82cd6c64b18b1c5a0da1149eb75adb7d30
mode 100644,000000..100644
--- /dev/null
@@@ -1,1234 -1,0 +1,1234 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSw_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSw_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rvdw_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 145 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 146 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*146);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSw_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSw_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rvdw_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 124 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 125 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*125);
 +}
index 3f68985c13da9448f466560abe438f72e27fb834,0000000000000000000000000000000000000000..3536c0c6d5cff9b2be596a75ab30a87c1be32e32
mode 100644,000000..100644
--- /dev/null
@@@ -1,2228 -1,0 +1,2228 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSw_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSw_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rvdw_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 358 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 359 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*359);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSw_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSw_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rvdw_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 301 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 302 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*302);
 +}
index df3839a75910deda3a2708314c6437fab6de4fab,0000000000000000000000000000000000000000..e8b030407eabbc3539612cd410ca6679bc5aaf23
mode 100644,000000..100644
--- /dev/null
@@@ -1,1402 -1,0 +1,1402 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSw_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSw_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rvdw_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_add_pd(rinv30,_mm256_mul_pd(krf,rsq30)),crf));
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 170 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_add_pd(rinv30,_mm256_mul_pd(krf,rsq30)),crf));
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 171 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*171);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSw_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSw_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rvdw_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 149 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 150 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*150);
 +}
index f2e5252c67378fba928e098304a114e35669cedc,0000000000000000000000000000000000000000..9749bac10120c958af6749cc436f7dcfba239f3c
mode 100644,000000..100644
--- /dev/null
@@@ -1,2412 -1,0 +1,2412 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSw_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSw_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rvdw_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_add_pd(rinv13,_mm256_mul_pd(krf,rsq13)),crf));
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_add_pd(rinv23,_mm256_mul_pd(krf,rsq23)),crf));
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_add_pd(rinv31,_mm256_mul_pd(krf,rsq31)),crf));
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_add_pd(rinv32,_mm256_mul_pd(krf,rsq32)),crf));
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_add_pd(rinv33,_mm256_mul_pd(krf,rsq33)),crf));
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 386 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_add_pd(rinv13,_mm256_mul_pd(krf,rsq13)),crf));
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_add_pd(rinv23,_mm256_mul_pd(krf,rsq23)),crf));
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_add_pd(rinv31,_mm256_mul_pd(krf,rsq31)),crf));
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_add_pd(rinv32,_mm256_mul_pd(krf,rsq32)),crf));
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_add_pd(rinv33,_mm256_mul_pd(krf,rsq33)),crf));
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 387 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*387);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwLJSw_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwLJSw_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    rswitch_scalar   = fr->rvdw_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 329 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            d                = _mm256_sub_pd(r00,rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +
 +            /* Evaluate switch function */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 330 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*330);
 +}
index d45bf31e96e7f669b78ae0e3111feee8f8ce2910,0000000000000000000000000000000000000000..52ae8ab6b86bea9a2a6321c81eb82c3b8d509ee8
mode 100644,000000..100644
--- /dev/null
@@@ -1,633 -1,0 +1,633 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwNone_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwNone_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 36 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 36 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 8 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*8 + inneriter*36);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwNone_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwNone_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 30 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            }
 +
 +            /* Inner loop uses 30 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*30);
 +}
index 1e73352fc188467def2d3a6eeae1d71991fede8f,0000000000000000000000000000000000000000..afd12cd60bf3bab62364fd6fd4687ea9cb304057
mode 100644,000000..100644
--- /dev/null
@@@ -1,1059 -1,0 +1,1059 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwNone_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwNone_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 111 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 111 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_VF,outeriter*19 + inneriter*111);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwNone_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwNone_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 93 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 93 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_F,outeriter*18 + inneriter*93);
 +}
index cf489d8cc6c8d0e9095a5dd011480c53b1fcc82f,0000000000000000000000000000000000000000..c282de4780fb6c911e3856cae59bd9c32a2b948d
mode 100644,000000..100644
--- /dev/null
@@@ -1,2083 -1,0 +1,2083 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwNone_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwNone_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 324 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 324 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*324);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwNone_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwNone_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 270 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq00,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq01,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq02,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 270 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*270);
 +}
index 206da669ba7837b919347371e85f0550ae4c7a89,0000000000000000000000000000000000000000..ec6b9558977b80ab36a5bd652b0266f323dea2cf
mode 100644,000000..100644
--- /dev/null
@@@ -1,1059 -1,0 +1,1059 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwNone_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwNone_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_add_pd(rinv30,_mm256_mul_pd(krf,rsq30)),crf));
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 111 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_add_pd(rinv30,_mm256_mul_pd(krf,rsq30)),crf));
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 111 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_VF,outeriter*19 + inneriter*111);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwNone_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwNone_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 93 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq10,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq20,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq30,rcutoff2))
 +            {
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 93 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_F,outeriter*18 + inneriter*93);
 +}
index a05b13c7d5b52cfe9931c86a56db229b525bc011,0000000000000000000000000000000000000000..dcd4ff00fa7a7f5cdee552f24890fa333fb6f353
mode 100644,000000..100644
--- /dev/null
@@@ -1,2083 -1,0 +1,2083 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwNone_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRFCut_VdwNone_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_add_pd(rinv13,_mm256_mul_pd(krf,rsq13)),crf));
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_add_pd(rinv23,_mm256_mul_pd(krf,rsq23)),crf));
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_add_pd(rinv31,_mm256_mul_pd(krf,rsq31)),crf));
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_add_pd(rinv32,_mm256_mul_pd(krf,rsq32)),crf));
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_add_pd(rinv33,_mm256_mul_pd(krf,rsq33)),crf));
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 324 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_add_pd(rinv13,_mm256_mul_pd(krf,rsq13)),crf));
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_add_pd(rinv23,_mm256_mul_pd(krf,rsq23)),crf));
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_add_pd(rinv31,_mm256_mul_pd(krf,rsq31)),crf));
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_add_pd(rinv32,_mm256_mul_pd(krf,rsq32)),crf));
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_add_pd(rinv33,_mm256_mul_pd(krf,rsq33)),crf));
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 324 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*324);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwNone_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRFCut_VdwNone_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 270 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq11,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq12,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq13,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq21,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq22,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq23,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq31,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq32,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            }
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            if (gmx_mm256_any_lt(rsq33,rcutoff2))
 +            {
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            }
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 270 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*270);
 +}
index f69ca59c46607ca930c87d40c4de6c00f5f571a1,0000000000000000000000000000000000000000..eb111057e45049a4f10ec0527766866ea4b3a688
mode 100644,000000..100644
--- /dev/null
@@@ -1,800 -1,0 +1,800 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwCSTab_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRF_VdwCSTab_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 67 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 68 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 9 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*68);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwCSTab_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRF_VdwCSTab_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 54 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 55 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*55);
 +}
index 29ea62eb789419436cf6db4973faa1c78f0086c3,0000000000000000000000000000000000000000..c7a2e252c63ffe1ea317818ae315818e6aa911c0
mode 100644,000000..100644
--- /dev/null
@@@ -1,1150 -1,0 +1,1150 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwCSTab_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRF_VdwCSTab_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 134 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 135 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*135);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwCSTab_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRF_VdwCSTab_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 111 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 112 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*112);
 +}
index 568d423a9d8e8f15e58b1f2bea356733f126cc76,0000000000000000000000000000000000000000..d953fb1db4c54b24fd59eccc45826d92e1246471
mode 100644,000000..100644
--- /dev/null
@@@ -1,1916 -1,0 +1,1916 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwCSTab_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRF_VdwCSTab_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 323 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 324 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*324);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwCSTab_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRF_VdwCSTab_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 270 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 271 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*271);
 +}
index 1aa1faf160fe4b86be94dff8c70123ae4a14f933,0000000000000000000000000000000000000000..63c306cf8f962038a018acdafc9767eeec058377
mode 100644,000000..100644
--- /dev/null
@@@ -1,1278 -1,0 +1,1278 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwCSTab_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRF_VdwCSTab_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_add_pd(rinv30,_mm256_mul_pd(krf,rsq30)),crf));
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 155 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_add_pd(rinv30,_mm256_mul_pd(krf,rsq30)),crf));
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 156 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*156);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwCSTab_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRF_VdwCSTab_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 132 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 133 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*133);
 +}
index d12e99437ab2edccb653d56cb1ccc4843f4a3fe6,0000000000000000000000000000000000000000..48766184092cfbaae15eade499d73e79717e40c5
mode 100644,000000..100644
--- /dev/null
@@@ -1,2060 -1,0 +1,2060 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwCSTab_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRF_VdwCSTab_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_add_pd(rinv13,_mm256_mul_pd(krf,rsq13)),crf));
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_add_pd(rinv23,_mm256_mul_pd(krf,rsq23)),crf));
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_add_pd(rinv31,_mm256_mul_pd(krf,rsq31)),crf));
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_add_pd(rinv32,_mm256_mul_pd(krf,rsq32)),crf));
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_add_pd(rinv33,_mm256_mul_pd(krf,rsq33)),crf));
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 347 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_00,VV);
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_add_pd(rinv13,_mm256_mul_pd(krf,rsq13)),crf));
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_add_pd(rinv23,_mm256_mul_pd(krf,rsq23)),crf));
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_add_pd(rinv31,_mm256_mul_pd(krf,rsq31)),crf));
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_add_pd(rinv32,_mm256_mul_pd(krf,rsq32)),crf));
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_add_pd(rinv33,_mm256_mul_pd(krf,rsq33)),crf));
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 348 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*348);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwCSTab_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            CubicSplineTable
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRF_VdwCSTab_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 294 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            r00              = _mm256_mul_pd(rsq00,rinv00);
 +            r00              = _mm256_andnot_pd(dummy_mask,r00);
 +
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r00,vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_00,FF);
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_00,FF);
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 295 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*295);
 +}
index 865021124078dd94523f8f4593b974ff92dd491a,0000000000000000000000000000000000000000..f5c84df7574cb2ae61af00a1fa42e5ae02844c2f
mode 100644,000000..100644
--- /dev/null
@@@ -1,672 -1,0 +1,672 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwLJ_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRF_VdwLJ_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 44 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 44 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 9 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*44);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwLJ_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRF_VdwLJ_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +        vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 34 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 34 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*34);
 +}
index 26c65e2b3a4ac9e0a52ef6b86a9ebd29e7bd792f,0000000000000000000000000000000000000000..8db375d2ea89a499be138f3891ec4756f2316f06
mode 100644,000000..100644
--- /dev/null
@@@ -1,1022 -1,0 +1,1022 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwLJ_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRF_VdwLJ_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 111 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 111 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*111);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwLJ_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRF_VdwLJ_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 91 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 91 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*91);
 +}
index 0628af28f06f321971a21ebadae1528d17e87ab5,0000000000000000000000000000000000000000..444fdc956ee1712297466ecf1481e15e537efc4c
mode 100644,000000..100644
--- /dev/null
@@@ -1,1788 -1,0 +1,1788 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwLJ_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRF_VdwLJ_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 300 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 300 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 20 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*300);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwLJ_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRF_VdwLJ_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 250 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 250 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*250);
 +}
index b3a18371c293ee3e20b4855cdf018dde3e5ec5ad,0000000000000000000000000000000000000000..6a3c7eaaa02bec9dc7e9f0e2cd60b05e4cc43daa
mode 100644,000000..100644
--- /dev/null
@@@ -1,1150 -1,0 +1,1150 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwLJ_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRF_VdwLJ_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_add_pd(rinv30,_mm256_mul_pd(krf,rsq30)),crf));
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 131 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_add_pd(rinv30,_mm256_mul_pd(krf,rsq30)),crf));
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 131 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*131);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwLJ_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRF_VdwLJ_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 111 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +            vdwjidx0A        = 2*vdwtype[jnrA+0];
 +            vdwjidx0B        = 2*vdwtype[jnrB+0];
 +            vdwjidx0C        = 2*vdwtype[jnrC+0];
 +            vdwjidx0D        = 2*vdwtype[jnrD+0];
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
 +                                            vdwioffsetptr0+vdwjidx0B,
 +                                            vdwioffsetptr0+vdwjidx0C,
 +                                            vdwioffsetptr0+vdwjidx0D,
 +                                            &c6_00,&c12_00);
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 111 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*111);
 +}
index 240e6319bb23576859621ca4f5e53f98268f8ce3,0000000000000000000000000000000000000000..bf6e04679883b780843a14d22659a0342d53a203
mode 100644,000000..100644
--- /dev/null
@@@ -1,1932 -1,0 +1,1932 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwLJ_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRF_VdwLJ_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +        vvdwsum          = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_add_pd(rinv13,_mm256_mul_pd(krf,rsq13)),crf));
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_add_pd(rinv23,_mm256_mul_pd(krf,rsq23)),crf));
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_add_pd(rinv31,_mm256_mul_pd(krf,rsq31)),crf));
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_add_pd(rinv32,_mm256_mul_pd(krf,rsq32)),crf));
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_add_pd(rinv33,_mm256_mul_pd(krf,rsq33)),crf));
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 323 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_add_pd(rinv13,_mm256_mul_pd(krf,rsq13)),crf));
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_add_pd(rinv23,_mm256_mul_pd(krf,rsq23)),crf));
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_add_pd(rinv31,_mm256_mul_pd(krf,rsq31)),crf));
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_add_pd(rinv32,_mm256_mul_pd(krf,rsq32)),crf));
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_add_pd(rinv33,_mm256_mul_pd(krf,rsq33)),crf));
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 323 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 26 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*323);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwLJ_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            LennardJones
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRF_VdwLJ_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +    vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    vdwjidx0A        = 2*vdwtype[inr+0];
 +    c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
 +    c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 273 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq00         = gmx_mm256_inv_pd(rsq00);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
 +
 +            fscal            = fvdw;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 273 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 24 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*273);
 +}
index fde9f635b9bf7482336f58175faf4e636bcf4222,0000000000000000000000000000000000000000..37ae1aa703d3c19b3303faf12c1a8af2b2403d07
mode 100644,000000..100644
--- /dev/null
@@@ -1,585 -1,0 +1,585 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwNone_GeomP1P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRF_VdwNone_GeomP1P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 32 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 32 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 8 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*8 + inneriter*32);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwNone_GeomP1P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Particle-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRF_VdwNone_GeomP1P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +
 +        /* Load parameters for i particles */
 +        iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 27 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +
 +            /* Inner loop uses 27 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 7 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*27);
 +}
index 188ae83b91e8833edf6c34082c0d71b1701748bf,0000000000000000000000000000000000000000..9b99483ca29d46dd95e0f66e4e15ddf358bb5210
mode 100644,000000..100644
--- /dev/null
@@@ -1,935 -1,0 +1,935 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwNone_GeomW3P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRF_VdwNone_GeomW3P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 99 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 99 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_VF,outeriter*19 + inneriter*99);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwNone_GeomW3P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Water3-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRF_VdwNone_GeomW3P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 84 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq00             = _mm256_mul_pd(iq0,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 84 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_F,outeriter*18 + inneriter*84);
 +}
index c8028edda0b5aae8a127e12e857ff50ac9ad1638,0000000000000000000000000000000000000000..2c3a8b8f08d47562bcfb45a88f0fb4a87b61becf
mode 100644,000000..100644
--- /dev/null
@@@ -1,1731 -1,0 +1,1731 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwNone_GeomW3W3_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRF_VdwNone_GeomW3W3_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 288 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 288 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*288);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwNone_GeomW3W3_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Water3-Water3
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRF_VdwNone_GeomW3W3_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr0;
 +    __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
 +    __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
 +    __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +
 +    jq0              = _mm256_set1_pd(charge[inr+0]);
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    qq00             = _mm256_mul_pd(iq0,jq0);
 +    qq01             = _mm256_mul_pd(iq0,jq1);
 +    qq02             = _mm256_mul_pd(iq0,jq2);
 +    qq10             = _mm256_mul_pd(iq1,jq0);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq20             = _mm256_mul_pd(iq2,jq0);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +
 +        fix0             = _mm256_setzero_pd();
 +        fiy0             = _mm256_setzero_pd();
 +        fiz0             = _mm256_setzero_pd();
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 243 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +
 +            /* Calculate displacement vector */
 +            dx00             = _mm256_sub_pd(ix0,jx0);
 +            dy00             = _mm256_sub_pd(iy0,jy0);
 +            dz00             = _mm256_sub_pd(iz0,jz0);
 +            dx01             = _mm256_sub_pd(ix0,jx1);
 +            dy01             = _mm256_sub_pd(iy0,jy1);
 +            dz01             = _mm256_sub_pd(iz0,jz1);
 +            dx02             = _mm256_sub_pd(ix0,jx2);
 +            dy02             = _mm256_sub_pd(iy0,jy2);
 +            dz02             = _mm256_sub_pd(iz0,jz2);
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
 +            rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
 +            rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +
 +            rinv00           = gmx_mm256_invsqrt_pd(rsq00);
 +            rinv01           = gmx_mm256_invsqrt_pd(rsq01);
 +            rinv02           = gmx_mm256_invsqrt_pd(rsq02);
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +
 +            rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
 +            rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
 +            rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx00);
 +            ty               = _mm256_mul_pd(fscal,dy00);
 +            tz               = _mm256_mul_pd(fscal,dz00);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx01);
 +            ty               = _mm256_mul_pd(fscal,dy01);
 +            tz               = _mm256_mul_pd(fscal,dz01);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx02);
 +            ty               = _mm256_mul_pd(fscal,dy02);
 +            tz               = _mm256_mul_pd(fscal,dz02);
 +
 +            /* Update vectorial force */
 +            fix0             = _mm256_add_pd(fix0,tx);
 +            fiy0             = _mm256_add_pd(fiy0,ty);
 +            fiz0             = _mm256_add_pd(fiz0,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +
 +            /* Inner loop uses 243 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*243);
 +}
index 244b59311e01113ed4c0319f26dda920fbaa9163,0000000000000000000000000000000000000000..424e22d8b5fb440e3e18dd9a9c3bff651e99a265
mode 100644,000000..100644
--- /dev/null
@@@ -1,935 -1,0 +1,935 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwNone_GeomW4P1_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRF_VdwNone_GeomW4P1_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_add_pd(rinv30,_mm256_mul_pd(krf,rsq30)),crf));
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 99 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_add_pd(rinv30,_mm256_mul_pd(krf,rsq30)),crf));
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 99 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_VF,outeriter*19 + inneriter*99);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwNone_GeomW4P1_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Water4-Particle
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRF_VdwNone_GeomW4P1_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
 +    __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
 +    __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
 +    __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
 +    __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 84 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +
 +            /* Calculate displacement vector */
 +            dx10             = _mm256_sub_pd(ix1,jx0);
 +            dy10             = _mm256_sub_pd(iy1,jy0);
 +            dz10             = _mm256_sub_pd(iz1,jz0);
 +            dx20             = _mm256_sub_pd(ix2,jx0);
 +            dy20             = _mm256_sub_pd(iy2,jy0);
 +            dz20             = _mm256_sub_pd(iz2,jz0);
 +            dx30             = _mm256_sub_pd(ix3,jx0);
 +            dy30             = _mm256_sub_pd(iy3,jy0);
 +            dz30             = _mm256_sub_pd(iz3,jz0);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
 +            rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
 +            rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
 +
 +            rinv10           = gmx_mm256_invsqrt_pd(rsq10);
 +            rinv20           = gmx_mm256_invsqrt_pd(rsq20);
 +            rinv30           = gmx_mm256_invsqrt_pd(rsq30);
 +
 +            rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
 +            rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
 +            rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
 +
 +            /* Load parameters for j particles */
 +            jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
 +                                                                 charge+jnrC+0,charge+jnrD+0);
 +
 +            fjx0             = _mm256_setzero_pd();
 +            fjy0             = _mm256_setzero_pd();
 +            fjz0             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq10             = _mm256_mul_pd(iq1,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx10);
 +            ty               = _mm256_mul_pd(fscal,dy10);
 +            tz               = _mm256_mul_pd(fscal,dz10);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq20             = _mm256_mul_pd(iq2,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx20);
 +            ty               = _mm256_mul_pd(fscal,dy20);
 +            tz               = _mm256_mul_pd(fscal,dz20);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* Compute parameters for interactions between i and j atoms */
 +            qq30             = _mm256_mul_pd(iq3,jq0);
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_mul_pd(rinv30,rinvsq30),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx30);
 +            ty               = _mm256_mul_pd(fscal,dy30);
 +            tz               = _mm256_mul_pd(fscal,dz30);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx0             = _mm256_add_pd(fjx0,tx);
 +            fjy0             = _mm256_add_pd(fjy0,ty);
 +            fjz0             = _mm256_add_pd(fjz0,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +
 +            /* Inner loop uses 84 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_F,outeriter*18 + inneriter*84);
 +}
index 77221d582a3279e07156aa2fd034f28527d10077,0000000000000000000000000000000000000000..6c7b730d64351f457006ff3bfab7be018112f8f9
mode 100644,000000..100644
--- /dev/null
@@@ -1,1731 -1,0 +1,1731 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/*
 + * Note: this file was generated by the Gromacs avx_256_double kernel generator.
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2001-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the Free
 + * Software Foundation; either version 2 of the License, or (at your option) any
 + * later version.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website.
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwNone_GeomW4W4_VF_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        PotentialAndForce
 + */
 +void
 +nb_kernel_ElecRF_VdwNone_GeomW4W4_VF_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Reset potential sums */
 +        velecsum         = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_add_pd(rinv13,_mm256_mul_pd(krf,rsq13)),crf));
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_add_pd(rinv23,_mm256_mul_pd(krf,rsq23)),crf));
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_add_pd(rinv31,_mm256_mul_pd(krf,rsq31)),crf));
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_add_pd(rinv32,_mm256_mul_pd(krf,rsq32)),crf));
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_add_pd(rinv33,_mm256_mul_pd(krf,rsq33)),crf));
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 288 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_add_pd(rinv13,_mm256_mul_pd(krf,rsq13)),crf));
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_add_pd(rinv23,_mm256_mul_pd(krf,rsq23)),crf));
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_add_pd(rinv31,_mm256_mul_pd(krf,rsq31)),crf));
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_add_pd(rinv32,_mm256_mul_pd(krf,rsq32)),crf));
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_add_pd(rinv33,_mm256_mul_pd(krf,rsq33)),crf));
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 288 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 19 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*288);
 +}
 +/*
 + * Gromacs nonbonded kernel:   nb_kernel_ElecRF_VdwNone_GeomW4W4_F_avx_256_double
 + * Electrostatics interaction: ReactionField
 + * VdW interaction:            None
 + * Geometry:                   Water4-Water4
 + * Calculate force/pot:        Force
 + */
 +void
 +nb_kernel_ElecRF_VdwNone_GeomW4W4_F_avx_256_double
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    real *           vdwioffsetptr1;
 +    __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
 +    real *           vdwioffsetptr2;
 +    __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
 +    real *           vdwioffsetptr3;
 +    __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
 +    int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
 +    __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
 +    int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
 +    __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
 +    int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
 +    __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
 +    __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
 +    __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
 +    __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
 +    __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
 +    __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
 +    __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
 +    __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
 +    __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
 +    __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
 +    iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
 +    iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
 +
 +    jq1              = _mm256_set1_pd(charge[inr+1]);
 +    jq2              = _mm256_set1_pd(charge[inr+2]);
 +    jq3              = _mm256_set1_pd(charge[inr+3]);
 +    qq11             = _mm256_mul_pd(iq1,jq1);
 +    qq12             = _mm256_mul_pd(iq1,jq2);
 +    qq13             = _mm256_mul_pd(iq1,jq3);
 +    qq21             = _mm256_mul_pd(iq2,jq1);
 +    qq22             = _mm256_mul_pd(iq2,jq2);
 +    qq23             = _mm256_mul_pd(iq2,jq3);
 +    qq31             = _mm256_mul_pd(iq3,jq1);
 +    qq32             = _mm256_mul_pd(iq3,jq2);
 +    qq33             = _mm256_mul_pd(iq3,jq3);
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +
 +        fix1             = _mm256_setzero_pd();
 +        fiy1             = _mm256_setzero_pd();
 +        fiz1             = _mm256_setzero_pd();
 +        fix2             = _mm256_setzero_pd();
 +        fiy2             = _mm256_setzero_pd();
 +        fiz2             = _mm256_setzero_pd();
 +        fix3             = _mm256_setzero_pd();
 +        fiy3             = _mm256_setzero_pd();
 +        fiz3             = _mm256_setzero_pd();
 +
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            fscal            = felec;
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 243 flops */
 +        }
 +
 +        if(jidx<j_index_end)
 +        {
 +
 +            /* Get j neighbor index, and coordinate index */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +
 +            /* Calculate displacement vector */
 +            dx11             = _mm256_sub_pd(ix1,jx1);
 +            dy11             = _mm256_sub_pd(iy1,jy1);
 +            dz11             = _mm256_sub_pd(iz1,jz1);
 +            dx12             = _mm256_sub_pd(ix1,jx2);
 +            dy12             = _mm256_sub_pd(iy1,jy2);
 +            dz12             = _mm256_sub_pd(iz1,jz2);
 +            dx13             = _mm256_sub_pd(ix1,jx3);
 +            dy13             = _mm256_sub_pd(iy1,jy3);
 +            dz13             = _mm256_sub_pd(iz1,jz3);
 +            dx21             = _mm256_sub_pd(ix2,jx1);
 +            dy21             = _mm256_sub_pd(iy2,jy1);
 +            dz21             = _mm256_sub_pd(iz2,jz1);
 +            dx22             = _mm256_sub_pd(ix2,jx2);
 +            dy22             = _mm256_sub_pd(iy2,jy2);
 +            dz22             = _mm256_sub_pd(iz2,jz2);
 +            dx23             = _mm256_sub_pd(ix2,jx3);
 +            dy23             = _mm256_sub_pd(iy2,jy3);
 +            dz23             = _mm256_sub_pd(iz2,jz3);
 +            dx31             = _mm256_sub_pd(ix3,jx1);
 +            dy31             = _mm256_sub_pd(iy3,jy1);
 +            dz31             = _mm256_sub_pd(iz3,jz1);
 +            dx32             = _mm256_sub_pd(ix3,jx2);
 +            dy32             = _mm256_sub_pd(iy3,jy2);
 +            dz32             = _mm256_sub_pd(iz3,jz2);
 +            dx33             = _mm256_sub_pd(ix3,jx3);
 +            dy33             = _mm256_sub_pd(iy3,jy3);
 +            dz33             = _mm256_sub_pd(iz3,jz3);
 +
 +            /* Calculate squared distance and things based on it */
 +            rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
 +            rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
 +            rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
 +            rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
 +            rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
 +            rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
 +            rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
 +            rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
 +            rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
 +
 +            rinv11           = gmx_mm256_invsqrt_pd(rsq11);
 +            rinv12           = gmx_mm256_invsqrt_pd(rsq12);
 +            rinv13           = gmx_mm256_invsqrt_pd(rsq13);
 +            rinv21           = gmx_mm256_invsqrt_pd(rsq21);
 +            rinv22           = gmx_mm256_invsqrt_pd(rsq22);
 +            rinv23           = gmx_mm256_invsqrt_pd(rsq23);
 +            rinv31           = gmx_mm256_invsqrt_pd(rsq31);
 +            rinv32           = gmx_mm256_invsqrt_pd(rsq32);
 +            rinv33           = gmx_mm256_invsqrt_pd(rsq33);
 +
 +            rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
 +            rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
 +            rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
 +            rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
 +            rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
 +            rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
 +            rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
 +            rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
 +            rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
 +
 +            fjx1             = _mm256_setzero_pd();
 +            fjy1             = _mm256_setzero_pd();
 +            fjz1             = _mm256_setzero_pd();
 +            fjx2             = _mm256_setzero_pd();
 +            fjy2             = _mm256_setzero_pd();
 +            fjz2             = _mm256_setzero_pd();
 +            fjx3             = _mm256_setzero_pd();
 +            fjy3             = _mm256_setzero_pd();
 +            fjz3             = _mm256_setzero_pd();
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx11);
 +            ty               = _mm256_mul_pd(fscal,dy11);
 +            tz               = _mm256_mul_pd(fscal,dz11);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx12);
 +            ty               = _mm256_mul_pd(fscal,dy12);
 +            tz               = _mm256_mul_pd(fscal,dz12);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx13);
 +            ty               = _mm256_mul_pd(fscal,dy13);
 +            tz               = _mm256_mul_pd(fscal,dz13);
 +
 +            /* Update vectorial force */
 +            fix1             = _mm256_add_pd(fix1,tx);
 +            fiy1             = _mm256_add_pd(fiy1,ty);
 +            fiz1             = _mm256_add_pd(fiz1,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx21);
 +            ty               = _mm256_mul_pd(fscal,dy21);
 +            tz               = _mm256_mul_pd(fscal,dz21);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx22);
 +            ty               = _mm256_mul_pd(fscal,dy22);
 +            tz               = _mm256_mul_pd(fscal,dz22);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx23);
 +            ty               = _mm256_mul_pd(fscal,dy23);
 +            tz               = _mm256_mul_pd(fscal,dz23);
 +
 +            /* Update vectorial force */
 +            fix2             = _mm256_add_pd(fix2,tx);
 +            fiy2             = _mm256_add_pd(fiy2,ty);
 +            fiz2             = _mm256_add_pd(fiz2,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx31);
 +            ty               = _mm256_mul_pd(fscal,dy31);
 +            tz               = _mm256_mul_pd(fscal,dz31);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx1             = _mm256_add_pd(fjx1,tx);
 +            fjy1             = _mm256_add_pd(fjy1,ty);
 +            fjz1             = _mm256_add_pd(fjz1,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx32);
 +            ty               = _mm256_mul_pd(fscal,dy32);
 +            tz               = _mm256_mul_pd(fscal,dz32);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx2             = _mm256_add_pd(fjx2,tx);
 +            fjy2             = _mm256_add_pd(fjy2,ty);
 +            fjz2             = _mm256_add_pd(fjz2,tz);
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            felec            = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
 +
 +            fscal            = felec;
 +
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx33);
 +            ty               = _mm256_mul_pd(fscal,dy33);
 +            tz               = _mm256_mul_pd(fscal,dz33);
 +
 +            /* Update vectorial force */
 +            fix3             = _mm256_add_pd(fix3,tx);
 +            fiy3             = _mm256_add_pd(fiy3,ty);
 +            fiz3             = _mm256_add_pd(fiz3,tz);
 +
 +            fjx3             = _mm256_add_pd(fjx3,tx);
 +            fjy3             = _mm256_add_pd(fjy3,ty);
 +            fjz3             = _mm256_add_pd(fjz3,tz);
 +
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +
 +            /* Inner loop uses 243 flops */
 +        }
 +
 +        /* End of innermost loop */
 +
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses 18 flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*243);
 +}
index 4e8d50428620a9bae44359ffaeddeaedbedc3dfb,0000000000000000000000000000000000000000..9f36946353161cecea954211e171a9aae4b784f2
mode 100644,000000..100644
--- /dev/null
@@@ -1,1047 -1,0 +1,1047 @@@
-             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +/* #if 0 */
 +#error This file must be processed with the Gromacs pre-preprocessor
 +/* #endif */
 +/* #if INCLUDE_HEADER */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "../nb_kernel.h"
 +#include "types/simple.h"
 +#include "vec.h"
 +#include "nrnb.h"
 +
 +#include "gmx_math_x86_avx_256_double.h"
 +#include "kernelutil_x86_avx_256_double.h"
 +/* #endif */
 +
 +/* ## List of variables set by the generating script:                                    */
 +/* ##                                                                                    */
 +/* ## Setttings that apply to the entire kernel:                                         */
 +/* ## KERNEL_ELEC:           String, choice for electrostatic interactions               */
 +/* ## KERNEL_VDW:            String, choice for van der Waals interactions               */
 +/* ## KERNEL_NAME:           String, name of this kernel                                 */
 +/* ## KERNEL_VF:             String telling if we calculate potential, force, or both    */
 +/* ## GEOMETRY_I/GEOMETRY_J: String, name of each geometry, e.g. 'Water3' or '1Particle' */
 +/* ##                                                                                    */
 +/* ## Setttings that apply to particles in the outer (I) or inner (J) loops:             */
 +/* ## PARTICLES_I[]/         Arrays with lists of i/j particles to use in kernel. It is  */
 +/* ## PARTICLES_J[]:         just [0] for particle geometry, but can be longer for water */
 +/* ## PARTICLES_ELEC_I[]/    Arrays with lists of i/j particle that have electrostatics  */
 +/* ## PARTICLES_ELEC_J[]:    interactions that should be calculated in this kernel.      */
 +/* ## PARTICLES_VDW_I[]/     Arrays with the list of i/j particle that have VdW          */
 +/* ## PARTICLES_VDW_J[]:     interactions that should be calculated in this kernel.      */
 +/* ##                                                                                    */
 +/* ## Setttings for pairs of interactions (e.g. 2nd i particle against 1st j particle)   */
 +/* ## PAIRS_IJ[]:            Array with (i,j) tuples of pairs for which interactions     */
 +/* ##                        should be calculated in this kernel. Zero-charge particles  */
 +/* ##                        do not have interactions with particles without vdw, and    */
 +/* ##                        Vdw-only interactions are not evaluated in a no-vdw-kernel. */
 +/* ## INTERACTION_FLAGS[][]: 2D matrix, dimension e.g. 3*3 for water-water interactions. */
 +/* ##                        For each i-j pair, the element [I][J] is a list of strings  */
 +/* ##                        defining properties/flags of this interaction. Examples     */
 +/* ##                        include 'electrostatics'/'vdw' if that type of interaction  */
 +/* ##                        should be evaluated, 'rsq'/'rinv'/'rinvsq' if those values  */
 +/* ##                        are needed, and 'exactcutoff' or 'shift','switch' to        */
 +/* ##                        decide if the force/potential should be modified. This way  */
 +/* ##                        we only calculate values absolutely needed for each case.   */
 +
 +/* ## Calculate the size and offset for (merged/interleaved) table data */
 +
 +/*
 + * Gromacs nonbonded kernel:   {KERNEL_NAME}
 + * Electrostatics interaction: {KERNEL_ELEC}
 + * VdW interaction:            {KERNEL_VDW}
 + * Geometry:                   {GEOMETRY_I}-{GEOMETRY_J}
 + * Calculate force/pot:        {KERNEL_VF}
 + */
 +void
 +{KERNEL_NAME}
 +                    (t_nblist * gmx_restrict                nlist,
 +                     rvec * gmx_restrict                    xx,
 +                     rvec * gmx_restrict                    ff,
 +                     t_forcerec * gmx_restrict              fr,
 +                     t_mdatoms * gmx_restrict               mdatoms,
 +                     nb_kernel_data_t * gmx_restrict        kernel_data,
 +                     t_nrnb * gmx_restrict                  nrnb)
 +{
 +    /* ## Not all variables are used for all kernels, but any optimizing compiler fixes that, */
 +    /* ## so there is no point in going to extremes to exclude variables that are not needed. */
 +    /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
 +     * just 0 for non-waters.
 +     * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
 +     * jnr indices corresponding to data put in the four positions in the SIMD register.
 +     */
 +    int              i_shift_offset,i_coord_offset,outeriter,inneriter;
 +    int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
 +    int              jnrA,jnrB,jnrC,jnrD;
 +    int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
 +    int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
 +    int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
 +    int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
 +    real             rcutoff_scalar;
 +    real             *shiftvec,*fshift,*x,*f;
 +    real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
 +    real             scratch[4*DIM];
 +    __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
 +    /* #for I in PARTICLES_I */
 +    real *           vdwioffsetptr{I};
 +    __m256d          ix{I},iy{I},iz{I},fix{I},fiy{I},fiz{I},iq{I},isai{I};
 +    /* #endfor */
 +    /* #for J in PARTICLES_J */
 +    int              vdwjidx{J}A,vdwjidx{J}B,vdwjidx{J}C,vdwjidx{J}D;
 +    __m256d          jx{J},jy{J},jz{J},fjx{J},fjy{J},fjz{J},jq{J},isaj{J};
 +    /* #endfor */
 +    /* #for I,J in PAIRS_IJ */
 +    __m256d          dx{I}{J},dy{I}{J},dz{I}{J},rsq{I}{J},rinv{I}{J},rinvsq{I}{J},r{I}{J},qq{I}{J},c6_{I}{J},c12_{I}{J};
 +    /* #endfor */
 +    /* #if KERNEL_ELEC != 'None' */
 +    __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
 +    real             *charge;
 +    /* #endif */
 +    /* #if 'GeneralizedBorn' in KERNEL_ELEC */
 +    __m128i          gbitab;
 +    __m256d          vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,dvdatmp;
 +    __m256d          minushalf = _mm256_set1_pd(-0.5);
 +    real             *invsqrta,*dvda,*gbtab;
 +    /* #endif */
 +    /* #if KERNEL_VDW != 'None' */
 +    int              nvdwtype;
 +    __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
 +    int              *vdwtype;
 +    real             *vdwparam;
 +    __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
 +    __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
 +    /* #endif */
 +    /* #if 'Table' in KERNEL_ELEC or 'GeneralizedBorn' in KERNEL_ELEC or 'Table' in KERNEL_VDW */
 +    __m128i          vfitab;
 +    __m128i          ifour       = _mm_set1_epi32(4);
 +    __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
 +    real             *vftab;
 +    /* #endif */
 +    /* #if 'Ewald' in KERNEL_ELEC */
 +    __m128i          ewitab;
 +    __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
 +    __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
 +    real             *ewtab;
 +    /* #endif */
 +    /* #if 'PotentialSwitch' in [KERNEL_MOD_ELEC,KERNEL_MOD_VDW] */
 +    __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
 +    real             rswitch_scalar,d_scalar;
 +    /* #endif */
 +    __m256d          dummy_mask,cutoff_mask;
 +    __m128           tmpmask0,tmpmask1;
 +    __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
 +    __m256d          one     = _mm256_set1_pd(1.0);
 +    __m256d          two     = _mm256_set1_pd(2.0);
 +    x                = xx[0];
 +    f                = ff[0];
 +
 +    nri              = nlist->nri;
 +    iinr             = nlist->iinr;
 +    jindex           = nlist->jindex;
 +    jjnr             = nlist->jjnr;
 +    shiftidx         = nlist->shift;
 +    gid              = nlist->gid;
 +    shiftvec         = fr->shift_vec[0];
 +    fshift           = fr->fshift[0];
 +    /* #if KERNEL_ELEC != 'None' */
 +    facel            = _mm256_set1_pd(fr->epsfac);
 +    charge           = mdatoms->chargeA;
 +    /*     #if 'ReactionField' in KERNEL_ELEC */
 +    krf              = _mm256_set1_pd(fr->ic->k_rf);
 +    krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
 +    crf              = _mm256_set1_pd(fr->ic->c_rf);
 +    /*     #endif */
 +    /* #endif */
 +    /* #if KERNEL_VDW != 'None' */
 +    nvdwtype         = fr->ntype;
 +    vdwparam         = fr->nbfp;
 +    vdwtype          = mdatoms->typeA;
 +    /* #endif */
 +
 +    /* #if 'Table' in KERNEL_ELEC and 'Table' in KERNEL_VDW */
 +    vftab            = kernel_data->table_elec_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec_vdw->scale);
 +    /* #elif 'Table' in KERNEL_ELEC */
 +    vftab            = kernel_data->table_elec->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_elec->scale);
 +    /* #elif 'Table' in KERNEL_VDW */
 +    vftab            = kernel_data->table_vdw->data;
 +    vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
 +    /* #endif */
 +
 +    /* #if 'Ewald' in KERNEL_ELEC */
 +    sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
 +    beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
 +    beta2            = _mm256_mul_pd(beta,beta);
 +    beta3            = _mm256_mul_pd(beta,beta2);
 +
 +    /*     #if KERNEL_VF=='Force' and KERNEL_MOD_ELEC!='PotentialSwitch' */
 +    ewtab            = fr->ic->tabq_coul_F;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +    /*     #else */
 +    ewtab            = fr->ic->tabq_coul_FDV0;
 +    ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
 +    ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
 +     /*     #endif */
 +    /* #endif */
 +
 +    /* #if KERNEL_ELEC=='GeneralizedBorn' */
 +    invsqrta         = fr->invsqrta;
 +    dvda             = fr->dvda;
 +    gbtabscale       = _mm256_set1_pd(fr->gbtab.scale);
 +    gbtab            = fr->gbtab.data;
 +    gbinvepsdiff     = _mm256_set1_pd((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
 +    /* #endif */
 +
 +    /* #if 'Water' in GEOMETRY_I */
 +    /* Setup water-specific parameters */
 +    inr              = nlist->iinr[0];
 +    /*     #for I in PARTICLES_ELEC_I */
 +    iq{I}              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+{I}]));
 +    /*     #endfor */
 +    /*     #for I in PARTICLES_VDW_I */
 +    vdwioffsetptr{I}   = vdwparam+2*nvdwtype*vdwtype[inr+{I}];
 +    /*     #endfor */
 +    /* #endif */
 +
 +    /* #if 'Water' in GEOMETRY_J */
 +    /*     #for J in PARTICLES_ELEC_J */
 +    jq{J}              = _mm256_set1_pd(charge[inr+{J}]);
 +    /*     #endfor */
 +    /*     #for J in PARTICLES_VDW_J */
 +    vdwjidx{J}A        = 2*vdwtype[inr+{J}];
 +    /*     #endfor */
 +    /*     #for I,J in PAIRS_IJ */
 +    /*         #if 'electrostatics' in INTERACTION_FLAGS[I][J] */
 +    qq{I}{J}             = _mm256_mul_pd(iq{I},jq{J});
 +    /*         #endif */
 +    /*         #if 'vdw' in INTERACTION_FLAGS[I][J] */
 +    c6_{I}{J}            = _mm256_set1_pd(vdwioffsetptr{I}[vdwjidx{J}A]);
 +    c12_{I}{J}           = _mm256_set1_pd(vdwioffsetptr{I}[vdwjidx{J}A+1]);
 +    /*         #endif */
 +    /*     #endfor */
 +    /* #endif */
 +
 +    /* #if KERNEL_MOD_ELEC!='None' or KERNEL_MOD_VDW!='None' */
 +    /*     #if KERNEL_ELEC!='None' */
 +    /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
 +    rcutoff_scalar   = fr->rcoulomb;
 +    /*     #else */
 +    rcutoff_scalar   = fr->rvdw;
 +    /*     #endif */
 +    rcutoff          = _mm256_set1_pd(rcutoff_scalar);
 +    rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
 +    /* #endif */
 +
 +    /* #if KERNEL_MOD_VDW=='PotentialShift' */
 +    sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
 +    rvdw             = _mm256_set1_pd(fr->rvdw);
 +    /* #endif */
 +
 +    /* #if 'PotentialSwitch' in [KERNEL_MOD_ELEC,KERNEL_MOD_VDW] */
 +    /*     #if KERNEL_MOD_ELEC=='PotentialSwitch'  */
 +    rswitch_scalar   = fr->rcoulomb_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /*     #else */
 +    rswitch_scalar   = fr->rvdw_switch;
 +    rswitch          = _mm256_set1_pd(rswitch_scalar);
 +    /*     #endif */
 +    /* Setup switch parameters */
 +    d_scalar         = rcutoff_scalar-rswitch_scalar;
 +    d                = _mm256_set1_pd(d_scalar);
 +    swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
 +    swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    /*     #if 'Force' in KERNEL_VF */
 +    swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
 +    swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
 +    swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
 +    /*     #endif */
 +    /* #endif */
 +
 +    /* Avoid stupid compiler warnings */
 +    jnrA = jnrB = jnrC = jnrD = 0;
 +    j_coord_offsetA = 0;
 +    j_coord_offsetB = 0;
 +    j_coord_offsetC = 0;
 +    j_coord_offsetD = 0;
 +
 +    /* ## Keep track of the floating point operations we issue for reporting! */
 +    /* #define OUTERFLOPS 0 */
 +    outeriter        = 0;
 +    inneriter        = 0;
 +
 +    for(iidx=0;iidx<4*DIM;iidx++)
 +    {
 +        scratch[iidx] = 0.0;
 +    }
 +
 +    /* Start outer loop over neighborlists */
 +    for(iidx=0; iidx<nri; iidx++)
 +    {
 +        /* Load shift vector for this list */
 +        i_shift_offset   = DIM*shiftidx[iidx];
 +
 +        /* Load limits for loop over neighbors */
 +        j_index_start    = jindex[iidx];
 +        j_index_end      = jindex[iidx+1];
 +
 +        /* Get outer coordinate index */
 +        inr              = iinr[iidx];
 +        i_coord_offset   = DIM*inr;
 +
 +        /* Load i particle coords and add shift vector */
 +        /* #if GEOMETRY_I == 'Particle' */
 +        gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
 +        /* #elif GEOMETRY_I == 'Water3' */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
 +        /* #elif GEOMETRY_I == 'Water4' */
 +        /*     #if 0 in PARTICLES_I                 */
 +        gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
 +                                                    &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +        /*     #else                                */
 +        gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
 +                                                    &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
 +        /*     #endif                               */
 +        /* #endif                                   */
 +
 +        /* #if 'Force' in KERNEL_VF */
 +        /*     #for I in PARTICLES_I */
 +        fix{I}             = _mm256_setzero_pd();
 +        fiy{I}             = _mm256_setzero_pd();
 +        fiz{I}             = _mm256_setzero_pd();
 +        /*     #endfor */
 +        /* #endif */
 +
 +        /* ## For water we already preloaded parameters at the start of the kernel */
 +        /* #if not 'Water' in GEOMETRY_I */
 +        /* Load parameters for i particles */
 +        /*     #for I in PARTICLES_ELEC_I */
 +        iq{I}              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+{I}]));
 +        /*         #define OUTERFLOPS OUTERFLOPS+1 */
 +        /*         #if KERNEL_ELEC=='GeneralizedBorn' */
 +        isai{I}            = _mm256_set1_pd(invsqrta[inr+{I}]);
 +        /*         #endif */
 +        /*     #endfor */
 +        /*     #for I in PARTICLES_VDW_I */
 +        vdwioffsetptr{I}   = vdwparam+2*nvdwtype*vdwtype[inr+{I}];
 +        /*     #endfor */
 +        /* #endif */
 +
 +        /* #if 'Potential' in KERNEL_VF */
 +        /* Reset potential sums */
 +        /*     #if KERNEL_ELEC != 'None' */
 +        velecsum         = _mm256_setzero_pd();
 +        /*     #endif */
 +        /*     #if 'GeneralizedBorn' in KERNEL_ELEC */
 +        vgbsum           = _mm256_setzero_pd();
 +        /*     #endif */
 +        /*     #if KERNEL_VDW != 'None' */
 +        vvdwsum          = _mm256_setzero_pd();
 +        /*     #endif */
 +        /* #endif */
 +        /*     #if 'GeneralizedBorn' in KERNEL_ELEC and 'Force' in KERNEL_VF */
 +        dvdasum          = _mm256_setzero_pd();
 +        /*     #endif */
 +
 +        /* #for ROUND in ['Loop','Epilogue'] */
 +
 +        /* #if ROUND =='Loop' */
 +        /* Start inner kernel loop */
 +        for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
 +        {
 +        /* ## First round is normal loop (next statement resets indentation) */
 +        /*     #if 0 */
 +        }
 +        /*     #endif */
 +        /* #else */
 +        if(jidx<j_index_end)
 +        {
 +        /* ## Second round is epilogue */
 +        /* #endif */
 +        /* #define INNERFLOPS 0 */
 +
 +            /* Get j neighbor index, and coordinate index */
 +            /* #if ROUND =='Loop' */
 +            jnrA             = jjnr[jidx];
 +            jnrB             = jjnr[jidx+1];
 +            jnrC             = jjnr[jidx+2];
 +            jnrD             = jjnr[jidx+3];
 +            /* #else */
 +            jnrlistA         = jjnr[jidx];
 +            jnrlistB         = jjnr[jidx+1];
 +            jnrlistC         = jjnr[jidx+2];
 +            jnrlistD         = jjnr[jidx+3];
 +            /* Sign of each element will be negative for non-real atoms.
 +             * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
 +             * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
 +             */
++            tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
 +
 +            tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
 +            tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
 +            dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
 +
 +            jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
 +            jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
 +            jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
 +            jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
 +            /* #endif */
 +            j_coord_offsetA  = DIM*jnrA;
 +            j_coord_offsetB  = DIM*jnrB;
 +            j_coord_offsetC  = DIM*jnrC;
 +            j_coord_offsetD  = DIM*jnrD;
 +
 +            /* load j atom coordinates */
 +            /* #if GEOMETRY_J == 'Particle'             */
 +            gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0);
 +            /* #elif GEOMETRY_J == 'Water3'             */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                              &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
 +            /* #elif GEOMETRY_J == 'Water4'             */
 +            /*     #if 0 in PARTICLES_J                 */
 +            gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
 +                                                 x+j_coord_offsetC,x+j_coord_offsetD,
 +                                                 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
 +                                                 &jy2,&jz2,&jx3,&jy3,&jz3);
 +            /*     #else                                */
 +            gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
 +                                                 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
 +                                                 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
 +            /*     #endif                               */
 +            /* #endif                                   */
 +
 +            /* Calculate displacement vector */
 +            /* #for I,J in PAIRS_IJ */
 +            dx{I}{J}             = _mm256_sub_pd(ix{I},jx{J});
 +            dy{I}{J}             = _mm256_sub_pd(iy{I},jy{J});
 +            dz{I}{J}             = _mm256_sub_pd(iz{I},jz{J});
 +            /*     #define INNERFLOPS INNERFLOPS+3 */
 +            /* #endfor */
 +
 +            /* Calculate squared distance and things based on it */
 +            /* #for I,J in PAIRS_IJ */
 +            rsq{I}{J}            = gmx_mm256_calc_rsq_pd(dx{I}{J},dy{I}{J},dz{I}{J});
 +            /*     #define INNERFLOPS INNERFLOPS+5 */
 +            /* #endfor */
 +
 +            /* #for I,J in PAIRS_IJ */
 +            /*     #if 'rinv' in INTERACTION_FLAGS[I][J] */
 +            rinv{I}{J}           = gmx_mm256_invsqrt_pd(rsq{I}{J});
 +            /*         #define INNERFLOPS INNERFLOPS+5 */
 +            /*     #endif */
 +            /* #endfor */
 +
 +            /* #for I,J in PAIRS_IJ */
 +            /*     #if 'rinvsq' in INTERACTION_FLAGS[I][J] */
 +            /*         # if 'rinv' not in INTERACTION_FLAGS[I][J] */
 +            rinvsq{I}{J}         = gmx_mm256_inv_pd(rsq{I}{J});
 +            /*             #define INNERFLOPS INNERFLOPS+4 */
 +            /*         #else */
 +            rinvsq{I}{J}         = _mm256_mul_pd(rinv{I}{J},rinv{I}{J});
 +            /*             #define INNERFLOPS INNERFLOPS+1 */
 +            /*         #endif */
 +            /*     #endif */
 +            /* #endfor */
 +
 +            /* #if not 'Water' in GEOMETRY_J */
 +            /* Load parameters for j particles */
 +            /*     #for J in PARTICLES_ELEC_J */
 +            jq{J}              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+{J},charge+jnrB+{J},
 +                                                                 charge+jnrC+{J},charge+jnrD+{J});
 +            /*         #if KERNEL_ELEC=='GeneralizedBorn' */
 +            isaj{J}            = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+{J},invsqrta+jnrB+{J},
 +                                                                 invsqrta+jnrC+{J},invsqrta+jnrD+{J});
 +            /*         #endif */
 +            /*     #endfor */
 +            /*     #for J in PARTICLES_VDW_J */
 +            vdwjidx{J}A        = 2*vdwtype[jnrA+{J}];
 +            vdwjidx{J}B        = 2*vdwtype[jnrB+{J}];
 +            vdwjidx{J}C        = 2*vdwtype[jnrC+{J}];
 +            vdwjidx{J}D        = 2*vdwtype[jnrD+{J}];
 +            /*     #endfor */
 +            /* #endif */
 +
 +            /* #if 'Force' in KERNEL_VF and not 'Particle' in GEOMETRY_I */
 +            /*     #for J in PARTICLES_J */
 +            fjx{J}             = _mm256_setzero_pd();
 +            fjy{J}             = _mm256_setzero_pd();
 +            fjz{J}             = _mm256_setzero_pd();
 +            /*     #endfor */
 +            /* #endif */
 +
 +            /* #for I,J in PAIRS_IJ */
 +
 +            /**************************
 +             * CALCULATE INTERACTIONS *
 +             **************************/
 +
 +            /*     ## Note special check for TIP4P-TIP4P. Since we are cutting of all hydrogen interactions we also cut the LJ-only O-O interaction */
 +            /*     #if 'exactcutoff' in INTERACTION_FLAGS[I][J] or (GEOMETRY_I=='Water4' and GEOMETRY_J=='Water4' and 'exactcutoff' in INTERACTION_FLAGS[1][1]) */
 +            /*         ## We always calculate rinv/rinvsq above to enable pipelineing in compilers (performance tested on x86) */
 +            if (gmx_mm256_any_lt(rsq{I}{J},rcutoff2))
 +            {
 +                /*     #if 0    ## this and the next two lines is a hack to maintain auto-indentation in template file */
 +            }
 +            /*         #endif */
 +            /*         #define INNERFLOPS INNERFLOPS+1 */
 +            /*     #endif */
 +
 +            /*     #if 'r' in INTERACTION_FLAGS[I][J] */
 +            r{I}{J}              = _mm256_mul_pd(rsq{I}{J},rinv{I}{J});
 +            /*         #if ROUND == 'Epilogue' */
 +            r{I}{J}              = _mm256_andnot_pd(dummy_mask,r{I}{J});
 +            /*             #define INNERFLOPS INNERFLOPS+1 */
 +            /*         #endif */
 +            /*         #define INNERFLOPS INNERFLOPS+1 */
 +            /*     #endif */
 +
 +            /*     ## For water geometries we already loaded parameters at the start of the kernel */
 +            /*     #if not 'Water' in GEOMETRY_J */
 +            /* Compute parameters for interactions between i and j atoms */
 +            /*         #if 'electrostatics' in INTERACTION_FLAGS[I][J] */
 +            qq{I}{J}             = _mm256_mul_pd(iq{I},jq{J});
 +            /*             #define INNERFLOPS INNERFLOPS+1 */
 +            /*         #endif */
 +            /*         #if 'vdw' in INTERACTION_FLAGS[I][J] */
 +            gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr{I}+vdwjidx{J}A,
 +                                            vdwioffsetptr{I}+vdwjidx{J}B,
 +                                            vdwioffsetptr{I}+vdwjidx{J}C,
 +                                            vdwioffsetptr{I}+vdwjidx{J}D,
 +                                            &c6_{I}{J},&c12_{I}{J});
 +            /*         #endif */
 +            /*     #endif */
 +
 +            /*     #if 'table' in INTERACTION_FLAGS[I][J] */
 +            /* Calculate table index by multiplying r with table scale and truncate to integer */
 +            rt               = _mm256_mul_pd(r{I}{J},vftabscale);
 +            vfitab           = _mm256_cvttpd_epi32(rt);
 +            vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            /*         #define INNERFLOPS INNERFLOPS+4                          */
 +            /*         #if 'Table' in KERNEL_ELEC and 'Table' in KERNEL_VDW     */
 +            /*             ## 3 tables, 4 bytes per point: multiply index by 12 */
 +            vfitab           = _mm_slli_epi32(_mm_add_epi32(vfitab,_mm_slli_epi32(vfitab,1)),2);
 +            /*         #elif 'Table' in KERNEL_ELEC                             */
 +            /*             ## 1 table, 4 bytes per point: multiply index by 4   */
 +            vfitab           = _mm_slli_epi32(vfitab,2);
 +            /*         #elif 'Table' in KERNEL_VDW                              */
 +            /*             ## 2 tables, 4 bytes per point: multiply index by 8  */
 +            vfitab           = _mm_slli_epi32(vfitab,3);
 +            /*         #endif                                                   */
 +            /*     #endif */
 +
 +            /*     ## ELECTROSTATIC INTERACTIONS */
 +            /*     #if 'electrostatics' in INTERACTION_FLAGS[I][J] */
 +
 +            /*         #if KERNEL_ELEC=='Coulomb' */
 +
 +            /* COULOMB ELECTROSTATICS */
 +            velec            = _mm256_mul_pd(qq{I}{J},rinv{I}{J});
 +            /*             #define INNERFLOPS INNERFLOPS+1 */
 +            /*             #if 'Force' in KERNEL_VF */
 +            felec            = _mm256_mul_pd(velec,rinvsq{I}{J});
 +            /*                 #define INNERFLOPS INNERFLOPS+1 */
 +            /*             #endif */
 +
 +            /*         #elif KERNEL_ELEC=='ReactionField' */
 +
 +            /* REACTION-FIELD ELECTROSTATICS */
 +            /*             #if 'Potential' in KERNEL_VF */
 +            velec            = _mm256_mul_pd(qq{I}{J},_mm256_sub_pd(_mm256_add_pd(rinv{I}{J},_mm256_mul_pd(krf,rsq{I}{J})),crf));
 +            /*                 #define INNERFLOPS INNERFLOPS+4 */
 +            /*             #endif */
 +            /*             #if 'Force' in KERNEL_VF */
 +            felec            = _mm256_mul_pd(qq{I}{J},_mm256_sub_pd(_mm256_mul_pd(rinv{I}{J},rinvsq{I}{J}),krf2));
 +            /*                 #define INNERFLOPS INNERFLOPS+3 */
 +            /*             #endif */
 +
 +            /*         #elif KERNEL_ELEC=='GeneralizedBorn' */
 +
 +            /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
 +            isaprod          = _mm256_mul_pd(isai{I},isaj{J});
 +            gbqqfactor       = _mm256_xor_pd(signbit,_mm256_mul_pd(qq{I}{J},_mm256_mul_pd(isaprod,gbinvepsdiff)));
 +            gbscale          = _mm256_mul_pd(isaprod,gbtabscale);
 +            /*             #define INNERFLOPS INNERFLOPS+5 */
 +
 +            /* Calculate generalized born table index - this is a separate table from the normal one,
 +             * but we use the same procedure by multiplying r with scale and truncating to integer.
 +             */
 +            rt               = _mm256_mul_pd(r{I}{J},gbscale);
 +            gbitab           = _mm256_cvttpd_epi32(rt);
 +            gbeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
 +            gbitab           = _mm_slli_epi32(gbitab,2);
 +            Y                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
 +            F                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
 +            G                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
 +            H                = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(gbeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
 +            vgb              = _mm256_mul_pd(gbqqfactor,VV);
 +            /*             #define INNERFLOPS INNERFLOPS+10 */
 +
 +            /*             #if 'Force' in KERNEL_VF */
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fgb              = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
 +            dvdatmp          = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r{I}{J})));
 +            dvdasum          = _mm256_add_pd(dvdasum,dvdatmp);
 +            /*                 #if ROUND == 'Loop' */
 +            fjptrA           = dvda+jnrA;
 +            fjptrB           = dvda+jnrB;
 +            fjptrC           = dvda+jnrC;
 +            fjptrD           = dvda+jnrD;
 +            /*                 #else */
 +            /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
 +            fjptrA             = (jnrlistA>=0) ? dvda+jnrA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? dvda+jnrB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? dvda+jnrC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? dvda+jnrD : scratch;
 +            /*                 #endif */
 +            gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj{J},isaj{J})));
 +            /*                 #define INNERFLOPS INNERFLOPS+12 */
 +            /*             #endif */
 +            velec            = _mm256_mul_pd(qq{I}{J},rinv{I}{J});
 +            /*                 #define INNERFLOPS INNERFLOPS+1 */
 +            /*             #if 'Force' in KERNEL_VF */
 +            felec            = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv{I}{J}),fgb),rinv{I}{J});
 +            /*                 #define INNERFLOPS INNERFLOPS+3 */
 +            /*             #endif */
 +
 +            /*         #elif KERNEL_ELEC=='Ewald' */
 +            /* EWALD ELECTROSTATICS */
 +
 +            /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
 +            ewrt             = _mm256_mul_pd(r{I}{J},ewtabscale);
 +            ewitab           = _mm256_cvttpd_epi32(ewrt);
 +            eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
 +            /*             #define INNERFLOPS INNERFLOPS+4 */
 +            /*             #if 'Potential' in KERNEL_VF or KERNEL_MOD_ELEC=='PotentialSwitch' */
 +            ewitab           = _mm_slli_epi32(ewitab,2);
 +            ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
 +            ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
 +            ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
 +            ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
 +            felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
 +            /*                 #define INNERFLOPS INNERFLOPS+2 */
 +            /*                 #if KERNEL_MOD_ELEC=='PotentialShift' */
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq{I}{J},_mm256_sub_pd(_mm256_sub_pd(rinv{I}{J},sh_ewald),velec));
 +            /*                     #define INNERFLOPS INNERFLOPS+7 */
 +            /*                 #else */
 +            velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
 +            velec            = _mm256_mul_pd(qq{I}{J},_mm256_sub_pd(rinv{I}{J},velec));
 +            /*                     #define INNERFLOPS INNERFLOPS+6 */
 +            /*                 #endif */
 +            /*                 #if 'Force' in KERNEL_VF */
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq{I}{J},rinv{I}{J}),_mm256_sub_pd(rinvsq{I}{J},felec));
 +            /*                      #define INNERFLOPS INNERFLOPS+3 */
 +            /*                 #endif */
 +            /*             #elif KERNEL_VF=='Force' */
 +            gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
 +                                            ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
 +                                            &ewtabF,&ewtabFn);
 +            felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
 +            felec            = _mm256_mul_pd(_mm256_mul_pd(qq{I}{J},rinv{I}{J}),_mm256_sub_pd(rinvsq{I}{J},felec));
 +            /*                 #define INNERFLOPS INNERFLOPS+7 */
 +            /*             #endif */
 +
 +            /*         #elif KERNEL_ELEC=='CubicSplineTable' */
 +
 +            /* CUBIC SPLINE TABLE ELECTROSTATICS */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            /*             #define INNERFLOPS INNERFLOPS+4 */
 +            /*             #if 'Potential' in KERNEL_VF */
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            velec            = _mm256_mul_pd(qq{I}{J},VV);
 +            /*                 #define INNERFLOPS INNERFLOPS+3 */
 +            /*             #endif */
 +            /*             #if 'Force' in KERNEL_VF */
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            felec            = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq{I}{J},FF),_mm256_mul_pd(vftabscale,rinv{I}{J})));
 +            /*                 #define INNERFLOPS INNERFLOPS+7 */
 +            /*             #endif */
 +            /*         #endif */
 +            /*         ## End of check for electrostatics interaction forms */
 +            /*     #endif */
 +            /*     ## END OF ELECTROSTATIC INTERACTION CHECK FOR PAIR I-J */
 +
 +            /*     #if 'vdw' in INTERACTION_FLAGS[I][J] */
 +
 +            /*         #if KERNEL_VDW=='LennardJones' */
 +
 +            /* LENNARD-JONES DISPERSION/REPULSION */
 +
 +            rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq{I}{J},rinvsq{I}{J}),rinvsq{I}{J});
 +            /*             #define INNERFLOPS INNERFLOPS+2 */
 +            /*             #if 'Potential' in KERNEL_VF or KERNEL_MOD_VDW=='PotentialSwitch' */
 +            vvdw6            = _mm256_mul_pd(c6_{I}{J},rinvsix);
 +            vvdw12           = _mm256_mul_pd(c12_{I}{J},_mm256_mul_pd(rinvsix,rinvsix));
 +            /*                 #define INNERFLOPS INNERFLOPS+3 */
 +            /*                 #if KERNEL_MOD_VDW=='PotentialShift' */
 +            vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_{I}{J},_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
 +                                          _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_{I}{J},sh_vdw_invrcut6)),one_sixth));
 +            /*                     #define INNERFLOPS INNERFLOPS+8 */
 +            /*                 #else */
 +            vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
 +            /*                     #define INNERFLOPS INNERFLOPS+3 */
 +            /*                 #endif */
 +            /*                 ## Check for force inside potential check, i.e. this means we already did the potential part */
 +            /*                 #if 'Force' in KERNEL_VF */
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq{I}{J});
 +            /*                     #define INNERFLOPS INNERFLOPS+2 */
 +            /*                 #endif */
 +            /*             #elif KERNEL_VF=='Force' */
 +            /*                 ## Force-only LennardJones makes it possible to save 1 flop (they do add up...) */
 +            fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_{I}{J},rinvsix),c6_{I}{J}),_mm256_mul_pd(rinvsix,rinvsq{I}{J}));
 +            /*                 #define INNERFLOPS INNERFLOPS+4 */
 +            /*             #endif */
 +
 +            /*         #elif KERNEL_VDW=='CubicSplineTable' */
 +
 +            /* CUBIC SPLINE TABLE DISPERSION */
 +            /*             #if 'Table' in KERNEL_ELEC */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            /*             #endif                     */
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            /*             #define INNERFLOPS INNERFLOPS+4 */
 +            /*             #if 'Potential' in KERNEL_VF */
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw6            = _mm256_mul_pd(c6_{I}{J},VV);
 +            /*                 #define INNERFLOPS INNERFLOPS+3 */
 +            /*             #endif */
 +            /*             #if 'Force' in KERNEL_VF */
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw6            = _mm256_mul_pd(c6_{I}{J},FF);
 +            /*                 #define INNERFLOPS INNERFLOPS+4 */
 +            /*             #endif */
 +
 +            /* CUBIC SPLINE TABLE REPULSION */
 +            vfitab           = _mm_add_epi32(vfitab,ifour);
 +            Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
 +            F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
 +            G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
 +            H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
 +            GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
 +            Heps             = _mm256_mul_pd(vfeps,H);
 +            Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
 +            /*             #define INNERFLOPS INNERFLOPS+4 */
 +            /*             #if 'Potential' in KERNEL_VF */
 +            VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
 +            vvdw12           = _mm256_mul_pd(c12_{I}{J},VV);
 +            /*                 #define INNERFLOPS INNERFLOPS+3 */
 +            /*             #endif */
 +            /*             #if 'Force' in KERNEL_VF */
 +            FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
 +            fvdw12           = _mm256_mul_pd(c12_{I}{J},FF);
 +            /*                 #define INNERFLOPS INNERFLOPS+5 */
 +            /*             #endif */
 +            /*             #if 'Potential' in KERNEL_VF */
 +            vvdw             = _mm256_add_pd(vvdw12,vvdw6);
 +            /*                 #define INNERFLOPS INNERFLOPS+1 */
 +            /*             #endif */
 +            /*             #if 'Force' in KERNEL_VF */
 +            fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv{I}{J})));
 +            /*                 #define INNERFLOPS INNERFLOPS+4 */
 +            /*             #endif */
 +            /*         #endif */
 +            /*         ## End of check for vdw interaction forms */
 +            /*     #endif */
 +            /*     ## END OF VDW INTERACTION CHECK FOR PAIR I-J */
 +
 +            /*     #if 'switch' in INTERACTION_FLAGS[I][J] */
 +            d                = _mm256_sub_pd(r{I}{J},rswitch);
 +            d                = _mm256_max_pd(d,_mm256_setzero_pd());
 +            d2               = _mm256_mul_pd(d,d);
 +            sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
 +            /*         #define INNERFLOPS INNERFLOPS+10 */
 +
 +            /*         #if 'Force' in KERNEL_VF */
 +            dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
 +            /*             #define INNERFLOPS INNERFLOPS+5 */
 +            /*         #endif */
 +
 +            /* Evaluate switch function */
 +            /*         #if 'Force' in KERNEL_VF */
 +            /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
 +            /*             #if 'electrostatics' in INTERACTION_FLAGS[I][J] and KERNEL_MOD_ELEC=='PotentialSwitch' */
 +            felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv{I}{J},_mm256_mul_pd(velec,dsw)) );
 +            /*                 #define INNERFLOPS INNERFLOPS+4 */
 +            /*             #endif */
 +            /*             #if 'vdw' in INTERACTION_FLAGS[I][J] and KERNEL_MOD_VDW=='PotentialSwitch' */
 +            fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv{I}{J},_mm256_mul_pd(vvdw,dsw)) );
 +            /*                 #define INNERFLOPS INNERFLOPS+4 */
 +            /*             #endif */
 +            /*         #endif */
 +            /*         #if 'Potential' in KERNEL_VF */
 +            /*             #if 'electrostatics' in INTERACTION_FLAGS[I][J] and KERNEL_MOD_ELEC=='PotentialSwitch' */
 +            velec            = _mm256_mul_pd(velec,sw);
 +            /*                 #define INNERFLOPS INNERFLOPS+1 */
 +            /*             #endif */
 +            /*             #if 'vdw' in INTERACTION_FLAGS[I][J] and KERNEL_MOD_VDW=='PotentialSwitch' */
 +            vvdw             = _mm256_mul_pd(vvdw,sw);
 +            /*                 #define INNERFLOPS INNERFLOPS+1 */
 +            /*             #endif */
 +            /*         #endif */
 +            /*     #endif */
 +            /*     ## Note special check for TIP4P-TIP4P. Since we are cutting of all hydrogen interactions we also cut the LJ-only O-O interaction */
 +            /*     #if 'exactcutoff' in INTERACTION_FLAGS[I][J] or (GEOMETRY_I=='Water4' and GEOMETRY_J=='Water4' and 'exactcutoff' in INTERACTION_FLAGS[1][1]) */
 +            cutoff_mask      = _mm256_cmp_pd(rsq{I}{J},rcutoff2,_CMP_LT_OQ);
 +            /*         #define INNERFLOPS INNERFLOPS+1 */
 +            /*     #endif */
 +
 +            /*     #if 'Potential' in KERNEL_VF */
 +            /* Update potential sum for this i atom from the interaction with this j atom. */
 +            /*         #if 'electrostatics' in INTERACTION_FLAGS[I][J] */
 +            /*             #if 'exactcutoff' in INTERACTION_FLAGS[I][J] */
 +            velec            = _mm256_and_pd(velec,cutoff_mask);
 +            /*                 #define INNERFLOPS INNERFLOPS+1 */
 +            /*             #endif                                       */
 +            /*             #if ROUND == 'Epilogue' */
 +            velec            = _mm256_andnot_pd(dummy_mask,velec);
 +            /*             #endif */
 +            velecsum         = _mm256_add_pd(velecsum,velec);
 +            /*             #define INNERFLOPS INNERFLOPS+1 */
 +            /*             #if KERNEL_ELEC=='GeneralizedBorn' */
 +            /*             #if 'exactcutoff' in INTERACTION_FLAGS[I][J] */
 +            vgb              = _mm256_and_pd(vgb,cutoff_mask);
 +            /*                 #define INNERFLOPS INNERFLOPS+1 */
 +            /*             #endif                                       */
 +            /*             #if ROUND == 'Epilogue' */
 +            vgb              = _mm256_andnot_pd(dummy_mask,vgb);
 +            /*             #endif */
 +            vgbsum           = _mm256_add_pd(vgbsum,vgb);
 +            /*                 #define INNERFLOPS INNERFLOPS+1 */
 +            /*             #endif */
 +            /*         #endif */
 +            /*         #if 'vdw' in INTERACTION_FLAGS[I][J] */
 +            /*     ## Note special check for TIP4P-TIP4P. Since we are cutting of all hydrogen interactions we also cut the LJ-only O-O interaction */
 +            /*     #if 'exactcutoff' in INTERACTION_FLAGS[I][J] or (GEOMETRY_I=='Water4' and GEOMETRY_J=='Water4' and 'exactcutoff' in INTERACTION_FLAGS[1][1]) */
 +            vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
 +            /*                 #define INNERFLOPS INNERFLOPS+1 */
 +            /*             #endif                                       */
 +            /*             #if ROUND == 'Epilogue' */
 +            vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
 +            /*             #endif */
 +            vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
 +            /*             #define INNERFLOPS INNERFLOPS+1 */
 +            /*         #endif */
 +            /*     #endif */
 +
 +            /*     #if 'Force' in KERNEL_VF */
 +
 +            /*         #if 'electrostatics' in INTERACTION_FLAGS[I][J] and 'vdw' in INTERACTION_FLAGS[I][J] */
 +            fscal            = _mm256_add_pd(felec,fvdw);
 +            /*             #define INNERFLOPS INNERFLOPS+1 */
 +            /*         #elif 'electrostatics' in INTERACTION_FLAGS[I][J] */
 +            fscal            = felec;
 +            /*         #elif 'vdw' in INTERACTION_FLAGS[I][J] */
 +            fscal            = fvdw;
 +            /*        #endif */
 +
 +            /*     ## Note special check for TIP4P-TIP4P. Since we are cutting of all hydrogen interactions we also cut the LJ-only O-O interaction */
 +            /*     #if 'exactcutoff' in INTERACTION_FLAGS[I][J] or (GEOMETRY_I=='Water4' and GEOMETRY_J=='Water4' and 'exactcutoff' in INTERACTION_FLAGS[1][1]) */
 +            fscal            = _mm256_and_pd(fscal,cutoff_mask);
 +            /*                 #define INNERFLOPS INNERFLOPS+1 */
 +            /*             #endif                                       */
 +
 +            /*             #if ROUND == 'Epilogue' */
 +            fscal            = _mm256_andnot_pd(dummy_mask,fscal);
 +            /*             #endif */
 +
 +            /* Calculate temporary vectorial force */
 +            tx               = _mm256_mul_pd(fscal,dx{I}{J});
 +            ty               = _mm256_mul_pd(fscal,dy{I}{J});
 +            tz               = _mm256_mul_pd(fscal,dz{I}{J});
 +
 +            /* Update vectorial force */
 +            fix{I}             = _mm256_add_pd(fix{I},tx);
 +            fiy{I}             = _mm256_add_pd(fiy{I},ty);
 +            fiz{I}             = _mm256_add_pd(fiz{I},tz);
 +            /*             #define INNERFLOPS INNERFLOPS+6 */
 +
 +            /* #if GEOMETRY_I == 'Particle'             */
 +            /*     #if ROUND == 'Loop' */
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            /*     #else */
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            /*     #endif */
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
 +            /*     #define INNERFLOPS INNERFLOPS+3      */
 +            /* #else                                    */
 +            fjx{J}             = _mm256_add_pd(fjx{J},tx);
 +            fjy{J}             = _mm256_add_pd(fjy{J},ty);
 +            fjz{J}             = _mm256_add_pd(fjz{J},tz);
 +            /*     #define INNERFLOPS INNERFLOPS+3      */
 +            /* #endif                                   */
 +
 +            /*     #endif */
 +
 +            /*     ## Note special check for TIP4P-TIP4P. Since we are cutting of all hydrogen interactions we also cut the LJ-only O-O interaction */
 +            /*     #if 'exactcutoff' in INTERACTION_FLAGS[I][J] or (GEOMETRY_I=='Water4' and GEOMETRY_J=='Water4' and 'exactcutoff' in INTERACTION_FLAGS[1][1]) */
 +            /*         #if 0    ## This and next two lines is a hack to maintain indentation in template file */
 +            {
 +                /*     #endif */
 +            }
 +            /*     #endif */
 +            /*    ## End of check for the interaction being outside the cutoff */
 +
 +            /* #endfor */
 +            /* ## End of loop over i-j interaction pairs */
 +
 +            /* #if GEOMETRY_I != 'Particle' */
 +            /*     #if ROUND == 'Loop' */
 +            fjptrA             = f+j_coord_offsetA;
 +            fjptrB             = f+j_coord_offsetB;
 +            fjptrC             = f+j_coord_offsetC;
 +            fjptrD             = f+j_coord_offsetD;
 +            /*     #else */
 +            fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
 +            fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
 +            fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
 +            fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
 +            /*     #endif */
 +            /* #endif */
 +
 +            /* #if 'Water' in GEOMETRY_I and GEOMETRY_J == 'Particle' */
 +            gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
 +            /*     #define INNERFLOPS INNERFLOPS+3      */
 +            /* #elif GEOMETRY_J == 'Water3'             */
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
 +            /*     #define INNERFLOPS INNERFLOPS+9      */
 +            /* #elif GEOMETRY_J == 'Water4'             */
 +            /*     #if 0 in PARTICLES_J                 */
 +            gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
 +                                                      fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
 +                                                      fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +            /*     #define INNERFLOPS INNERFLOPS+12     */
 +            /*     #else                                */
 +            gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
 +                                                      fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
 +            /*     #define INNERFLOPS INNERFLOPS+9      */
 +            /*     #endif                               */
 +            /* #endif                                   */
 +
 +            /* Inner loop uses {INNERFLOPS} flops */
 +        }
 +
 +        /* #endfor */
 +
 +        /* End of innermost loop */
 +
 +        /* #if 'Force' in KERNEL_VF */
 +        /*     #if GEOMETRY_I == 'Particle'            */
 +        gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +        /*         #define OUTERFLOPS OUTERFLOPS+6     */
 +        /*     #elif GEOMETRY_I == 'Water3'            */
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +        /*         #define OUTERFLOPS OUTERFLOPS+18    */
 +        /*     #elif GEOMETRY_I == 'Water4'            */
 +        /*         #if 0 in PARTICLES_I                */
 +        gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset,fshift+i_shift_offset);
 +        /*             #define OUTERFLOPS OUTERFLOPS+24    */
 +        /*         #else                               */
 +        gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
 +                                                 f+i_coord_offset+DIM,fshift+i_shift_offset);
 +        /*             #define OUTERFLOPS OUTERFLOPS+18    */
 +        /*         #endif                              */
 +        /*     #endif                                  */
 +        /* #endif                                      */
 +
 +        /* #if 'Potential' in KERNEL_VF */
 +        ggid                        = gid[iidx];
 +        /* Update potential energies */
 +        /*     #if KERNEL_ELEC != 'None' */
 +        gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
 +        /*         #define OUTERFLOPS OUTERFLOPS+1 */
 +        /*     #endif */
 +        /*     #if 'GeneralizedBorn' in KERNEL_ELEC */
 +        gmx_mm256_update_1pot_pd(vgbsum,kernel_data->energygrp_polarization+ggid);
 +        /*         #define OUTERFLOPS OUTERFLOPS+1 */
 +        /*     #endif */
 +        /*     #if KERNEL_VDW != 'None' */
 +        gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
 +        /*         #define OUTERFLOPS OUTERFLOPS+1 */
 +        /*     #endif */
 +        /* #endif */
 +        /*     #if 'GeneralizedBorn' in KERNEL_ELEC and 'Force' in KERNEL_VF */
 +        dvdasum = _mm256_mul_pd(dvdasum, _mm256_mul_pd(isai{I},isai{I}));
 +        gmx_mm256_update_1pot_pd(dvdasum,dvda+inr);
 +        /*     #endif */
 +
 +        /* Increment number of inner iterations */
 +        inneriter                  += j_index_end - j_index_start;
 +
 +        /* Outer loop uses {OUTERFLOPS} flops */
 +    }
 +
 +    /* Increment number of outer iterations */
 +    outeriter        += nri;
 +
 +    /* Update outer/inner flops */
 +    /* ## NB: This is not important, it just affects the flopcount. However, since our preprocessor is */
 +    /* ## primitive and replaces aggressively even in strings inside these directives, we need to      */
 +    /* ## assemble the main part of the name (containing KERNEL/ELEC/VDW) directly in the source.      */
 +    /* #if GEOMETRY_I == 'Water3'            */
 +    /*     #define ISUFFIX '_W3'             */
 +    /* #elif GEOMETRY_I == 'Water4'          */
 +    /*     #define ISUFFIX '_W4'             */
 +    /* #else                                 */
 +    /*     #define ISUFFIX ''                */
 +    /* #endif                                */
 +    /* #if GEOMETRY_J == 'Water3'            */
 +    /*     #define JSUFFIX 'W3'              */
 +    /* #elif GEOMETRY_J == 'Water4'          */
 +    /*     #define JSUFFIX 'W4'              */
 +    /* #else                                 */
 +    /*     #define JSUFFIX ''                */
 +    /* #endif                                */
 +    /* #if 'PotentialAndForce' in KERNEL_VF  */
 +    /*     #define VFSUFFIX  '_VF'           */
 +    /* #elif 'Potential' in KERNEL_VF        */
 +    /*     #define VFSUFFIX '_V'             */
 +    /* #else                                 */
 +    /*     #define VFSUFFIX '_F'             */
 +    /* #endif                                */
 +
 +    /* #if KERNEL_ELEC != 'None' and KERNEL_VDW != 'None' */
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW{ISUFFIX}{JSUFFIX}{VFSUFFIX},outeriter*{OUTERFLOPS} + inneriter*{INNERFLOPS});
 +    /* #elif KERNEL_ELEC != 'None' */
 +    inc_nrnb(nrnb,eNR_NBKERNEL_ELEC{ISUFFIX}{JSUFFIX}{VFSUFFIX},outeriter*{OUTERFLOPS} + inneriter*{INNERFLOPS});
 +    /* #else */
 +    inc_nrnb(nrnb,eNR_NBKERNEL_VDW{ISUFFIX}{JSUFFIX}{VFSUFFIX},outeriter*{OUTERFLOPS} + inneriter*{INNERFLOPS});
 +    /* #endif  */
 +}
index e0b324f582de5960fbdd5e4d624954808e9edde2,0000000000000000000000000000000000000000..278312182055bc47f5848cedc9de2c41b7c46191
mode 100644,000000..100644
--- /dev/null
@@@ -1,672 -1,0 +1,855 @@@
- #include <math.h> 
 +/*
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2011-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under 
 + * the terms of the GNU Lesser General Public License as published by the Free 
 + * Software Foundation; either version 2 of the License, or (at your option) any 
 + * later version.
 + * As a special exception, you may use this file as part of a free software
 + * library without restriction.  Specifically, if other files instantiate
 + * templates or use macros or inline functions from this file, or you compile
 + * this file and link it with other files to produce an executable, this
 + * file does not by itself cause the resulting executable to be covered by
 + * the GNU Lesser General Public License.  
 + *
 + * In plain-speak: do not worry about classes/macros/templates either - only
 + * changes to the library have to be LGPL, not an application linking with it.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website!
 + */
 +#ifndef _kernelutil_x86_sse2_single_h_
 +#define _kernelutil_x86_sse2_single_h_
 +
 +/* We require SSE2 now! */
 +
-                                          const float * gmx_restrict xyz,
-                                          __m128 * gmx_restrict x1,
-                                          __m128 * gmx_restrict y1,
-                                          __m128 * gmx_restrict z1)
++#include <math.h>
 +
 +#include "gmx_x86_sse2.h"
 +
 +
 +/* Normal sum of four xmm registers */
 +#define gmx_mm_sum4_ps(t0,t1,t2,t3)  _mm_add_ps(_mm_add_ps(t0,t1),_mm_add_ps(t2,t3))
 +
 +static gmx_inline __m128
 +gmx_mm_calc_rsq_ps(__m128 dx, __m128 dy, __m128 dz)
 +{
 +    return _mm_add_ps( _mm_add_ps( _mm_mul_ps(dx,dx), _mm_mul_ps(dy,dy) ), _mm_mul_ps(dz,dz) );
 +}
 +
 +static int
 +gmx_mm_any_lt(__m128 a, __m128 b)
 +{
 +    return _mm_movemask_ps(_mm_cmplt_ps(a,b));
 +}
 +
 +/* Load a single value from 1-4 places, merge into xmm register */
 +
 +static __m128
 +gmx_mm_load_4real_swizzle_ps(const float * gmx_restrict ptrA,
 +                             const float * gmx_restrict ptrB,
 +                             const float * gmx_restrict ptrC,
 +                             const float * gmx_restrict ptrD)
 +{
 +    __m128 t1,t2;
 +
 +    t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA),_mm_load_ss(ptrC));
 +    t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB),_mm_load_ss(ptrD));
 +    return _mm_unpacklo_ps(t1,t2);
 +}
 +
 +static void
 +gmx_mm_store_4real_swizzle_ps(float * gmx_restrict ptrA,
 +                              float * gmx_restrict ptrB,
 +                              float * gmx_restrict ptrC,
 +                              float * gmx_restrict ptrD,
 +                              __m128 xmm1)
 +{
 +    __m128 t2,t3,t4;
 +
 +    t3       = _mm_movehl_ps(_mm_setzero_ps(),xmm1);
 +    t2       = _mm_shuffle_ps(xmm1,xmm1,_MM_SHUFFLE(1,1,1,1));
 +    t4       = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1));
 +    _mm_store_ss(ptrA,xmm1);
 +    _mm_store_ss(ptrB,t2);
 +    _mm_store_ss(ptrC,t3);
 +    _mm_store_ss(ptrD,t4);
 +}
 +
 +/* Similar to store, but increments value in memory */
 +static void
 +gmx_mm_increment_4real_swizzle_ps(float * gmx_restrict ptrA,
 +                                  float * gmx_restrict ptrB,
 +                                  float * gmx_restrict ptrC,
 +                                  float * gmx_restrict ptrD, __m128 xmm1)
 +{
 +    __m128 tmp;
 +
 +    tmp = gmx_mm_load_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD);
 +    tmp = _mm_add_ps(tmp,xmm1);
 +    gmx_mm_store_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD,tmp);
 +}
 +
 +
 +static void
 +gmx_mm_load_4pair_swizzle_ps(const float * gmx_restrict p1,
 +                             const float * gmx_restrict p2,
 +                             const float * gmx_restrict p3,
 +                             const float * gmx_restrict p4,
 +                             __m128 * gmx_restrict c6,
 +                             __m128 * gmx_restrict c12)
 +{
 +    __m128 t1,t2,t3,t4;
 +
 +    t1   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p1);   /* - - c12a  c6a */
 +    t2   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p2);   /* - - c12b  c6b */
 +    t3   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p3);   /* - - c12c  c6c */
 +    t4   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p4);   /* - - c12d  c6d */
 +    t1   = _mm_unpacklo_ps(t1,t2);
 +    t2   = _mm_unpacklo_ps(t3,t4);
 +    *c6  = _mm_movelh_ps(t1,t2);
 +    *c12 = _mm_movehl_ps(t2,t1);
 +}
 +
 +/* Routines to load 1-4 rvec from 4 places.
 + * We mainly use these to load coordinates. The extra routines
 + * are very efficient for the water-water loops, since we e.g.
 + * know that a TIP4p water has 4 atoms, so we should load 12 floats+shuffle.
 + */
 +
 +
 +static gmx_inline void
 +gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
-     
++        const float * gmx_restrict xyz,
++        __m128 * gmx_restrict x1,
++        __m128 * gmx_restrict y1,
++        __m128 * gmx_restrict z1)
 +{
 +    __m128 t1,t2,t3,t4;
-     
++
 +    t1   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
 +    t2   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz);
 +    t3   = _mm_load_ss(xyz_shift+2);
 +    t4   = _mm_load_ss(xyz+2);
 +    t1   = _mm_add_ps(t1,t2);
 +    t3   = _mm_add_ss(t3,t4);
-                                          const float * gmx_restrict xyz,
-                                          __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
-                                          __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
-                                          __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
++
 +    *x1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
 +    *y1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
 +    *z1  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
 +}
 +
 +
 +static gmx_inline void
 +gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
-     
++        const float * gmx_restrict xyz,
++        __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
++        __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
++        __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
 +{
 +    __m128 tA,tB;
 +    __m128 t1,t2,t3,t4,t5,t6;
-     
++
 +    tA   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
 +    tB   = _mm_load_ss(xyz_shift+2);
-     
++
 +    t1   = _mm_loadu_ps(xyz);
 +    t2   = _mm_loadu_ps(xyz+4);
 +    t3   = _mm_load_ss(xyz+8);
-     
++
 +    tA   = _mm_movelh_ps(tA,tB);
 +    t4   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
 +    t5   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
 +    t6   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
-     
++
 +    t1   = _mm_add_ps(t1,t4);
 +    t2   = _mm_add_ps(t2,t5);
 +    t3   = _mm_add_ss(t3,t6);
-                                          const float * gmx_restrict xyz,
-                                          __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
-                                          __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
-                                          __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
-                                          __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
++
 +    *x1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
 +    *y1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
 +    *z1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
 +    *x2  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
 +    *y2  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
 +    *z2  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
 +    *x3  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
 +    *y3  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
 +    *z3  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
 +}
 +
 +
 +static gmx_inline void
 +gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
-     
++        const float * gmx_restrict xyz,
++        __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
++        __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
++        __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
++        __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
 +{
 +    __m128 tA,tB;
 +    __m128 t1,t2,t3,t4,t5,t6;
-     
++
 +    tA   = _mm_castpd_ps(_mm_load_sd((const double *)xyz_shift));
 +    tB   = _mm_load_ss(xyz_shift+2);
-     
++
 +    t1   = _mm_loadu_ps(xyz);
 +    t2   = _mm_loadu_ps(xyz+4);
 +    t3   = _mm_loadu_ps(xyz+8);
-     
++
 +    tA   = _mm_movelh_ps(tA,tB);
 +    t4   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
 +    t5   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
 +    t6   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
-     
++
 +    t1   = _mm_add_ps(t1,t4);
 +    t2   = _mm_add_ps(t2,t5);
 +    t3   = _mm_add_ps(t3,t6);
-                                   __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3) 
++
 +    *x1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
 +    *y1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
 +    *z1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
 +    *x2  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
 +    *y2  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
 +    *z2  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
 +    *x3  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
 +    *y3  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
 +    *z3  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
 +    *x4  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1));
 +    *y4  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(2,2,2,2));
 +    *z4  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(3,3,3,3));
 +}
 +
 +
 +static void
 +gmx_mm_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
 +                                  const float * gmx_restrict ptrB,
 +                                  const float * gmx_restrict ptrC,
 +                                  const float * gmx_restrict ptrD,
 +                                  __m128 *      gmx_restrict x1,
 +                                  __m128 *      gmx_restrict y1,
 +                                  __m128 *      gmx_restrict z1)
 +{
 +    __m128 t1,t2,t3,t4,t5,t6,t7,t8;
 +    t1   = _mm_castpd_ps(_mm_load_sd((const double *)ptrA));
 +    t2   = _mm_castpd_ps(_mm_load_sd((const double *)ptrB));
 +    t3   = _mm_castpd_ps(_mm_load_sd((const double *)ptrC));
 +    t4   = _mm_castpd_ps(_mm_load_sd((const double *)ptrD));
 +    t5 = _mm_load_ss(ptrA+2);
 +    t6 = _mm_load_ss(ptrB+2);
 +    t7 = _mm_load_ss(ptrC+2);
 +    t8 = _mm_load_ss(ptrD+2);
 +    t1 = _mm_unpacklo_ps(t1,t2);
 +    t3 = _mm_unpacklo_ps(t3,t4);
 +    *x1 = _mm_movelh_ps(t1,t3);
 +    *y1 = _mm_movehl_ps(t3,t1);
 +    t5  = _mm_unpacklo_ps(t5,t6);
 +    t7  = _mm_unpacklo_ps(t7,t8);
 +    *z1 = _mm_movelh_ps(t5,t7);
 +}
 +
 +
 +static void
 +gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
 +                                  const float * gmx_restrict ptrB,
 +                                  const float * gmx_restrict ptrC,
 +                                  const float * gmx_restrict ptrD,
 +                                  __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
 +                                  __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
-                                   __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4) 
++                                  __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
 +{
 +    __m128 t1,t2,t3,t4;
 +    t1            = _mm_loadu_ps(ptrA);
 +    t2            = _mm_loadu_ps(ptrB);
 +    t3            = _mm_loadu_ps(ptrC);
 +    t4            = _mm_loadu_ps(ptrD);
 +    _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
 +    *x1           = t1;
 +    *y1           = t2;
 +    *z1           = t3;
 +    *x2           = t4;
 +    t1            = _mm_loadu_ps(ptrA+4);
 +    t2            = _mm_loadu_ps(ptrB+4);
 +    t3            = _mm_loadu_ps(ptrC+4);
 +    t4            = _mm_loadu_ps(ptrD+4);
 +    _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
 +    *y2           = t1;
 +    *z2           = t2;
 +    *x3           = t3;
 +    *y3           = t4;
 +    t1            = _mm_load_ss(ptrA+8);
 +    t2            = _mm_load_ss(ptrB+8);
 +    t3            = _mm_load_ss(ptrC+8);
 +    t4            = _mm_load_ss(ptrD+8);
 +    t1            = _mm_unpacklo_ps(t1,t3);
 +    t3            = _mm_unpacklo_ps(t2,t4);
 +    *z3           = _mm_unpacklo_ps(t1,t3);
 +}
 +
 +
 +static void
 +gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
 +                                  const float * gmx_restrict ptrB,
 +                                  const float * gmx_restrict ptrC,
 +                                  const float * gmx_restrict ptrD,
 +                                  __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
 +                                  __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
 +                                  __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
-                                        __m128 x3, __m128 y3, __m128 z3) 
++                                  __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
 +{
 +    __m128 t1,t2,t3,t4;
 +    t1            = _mm_loadu_ps(ptrA);
 +    t2            = _mm_loadu_ps(ptrB);
 +    t3            = _mm_loadu_ps(ptrC);
 +    t4            = _mm_loadu_ps(ptrD);
 +    _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
 +    *x1           = t1;
 +    *y1           = t2;
 +    *z1           = t3;
 +    *x2           = t4;
 +    t1            = _mm_loadu_ps(ptrA+4);
 +    t2            = _mm_loadu_ps(ptrB+4);
 +    t3            = _mm_loadu_ps(ptrC+4);
 +    t4            = _mm_loadu_ps(ptrD+4);
 +    _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
 +    *y2           = t1;
 +    *z2           = t2;
 +    *x3           = t3;
 +    *y3           = t4;
 +    t1            = _mm_loadu_ps(ptrA+8);
 +    t2            = _mm_loadu_ps(ptrB+8);
 +    t3            = _mm_loadu_ps(ptrC+8);
 +    t4            = _mm_loadu_ps(ptrD+8);
 +    _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
 +    *z3           = t1;
 +    *x4           = t2;
 +    *y4           = t3;
 +    *z4           = t4;
 +}
 +
 +
 +static void
 +gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA,
 +                                       float * gmx_restrict ptrB,
 +                                       float * gmx_restrict ptrC,
 +                                       float * gmx_restrict ptrD,
 +                                       __m128 x1, __m128 y1, __m128 z1)
 +{
 +    __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12;
 +    t5          = _mm_unpacklo_ps(y1,z1);
 +    t6          = _mm_unpackhi_ps(y1,z1);
 +    t7          = _mm_shuffle_ps(x1,t5,_MM_SHUFFLE(1,0,0,0));
 +    t8          = _mm_shuffle_ps(x1,t5,_MM_SHUFFLE(3,2,0,1));
 +    t9          = _mm_shuffle_ps(x1,t6,_MM_SHUFFLE(1,0,0,2));
 +    t10         = _mm_shuffle_ps(x1,t6,_MM_SHUFFLE(3,2,0,3));
 +    t1          = _mm_load_ss(ptrA);
 +    t1          = _mm_loadh_pi(t1,(__m64 *)(ptrA+1));
 +    t1          = _mm_sub_ps(t1,t7);
 +    _mm_store_ss(ptrA,t1);
 +    _mm_storeh_pi((__m64 *)(ptrA+1),t1);
 +    t2          = _mm_load_ss(ptrB);
 +    t2          = _mm_loadh_pi(t2,(__m64 *)(ptrB+1));
 +    t2          = _mm_sub_ps(t2,t8);
 +    _mm_store_ss(ptrB,t2);
 +    _mm_storeh_pi((__m64 *)(ptrB+1),t2);
 +    t3          = _mm_load_ss(ptrC);
 +    t3          = _mm_loadh_pi(t3,(__m64 *)(ptrC+1));
 +    t3          = _mm_sub_ps(t3,t9);
 +    _mm_store_ss(ptrC,t3);
 +    _mm_storeh_pi((__m64 *)(ptrC+1),t3);
 +    t4          = _mm_load_ss(ptrD);
 +    t4          = _mm_loadh_pi(t4,(__m64 *)(ptrD+1));
 +    t4          = _mm_sub_ps(t4,t10);
 +    _mm_store_ss(ptrD,t4);
 +    _mm_storeh_pi((__m64 *)(ptrD+1),t4);
 +}
 +
 +
 +
++#if defined (_MSC_VER) && defined(_M_IX86)
++/* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
++#define gmx_mm_decrement_3rvec_4ptr_swizzle_ps(ptrA,ptrB,ptrC,ptrD, \
++_x1,_y1,_z1,_x2,_y2,_z2,_x3,_y3,_z3) \
++{\
++__m128 _t1,_t2,_t3,_t4,_t5,_t6,_t7,_t8,_t9,_t10;\
++__m128 _t11,_t12,_t13,_t14,_t15,_t16,_t17,_t18,_t19;\
++__m128 _t20,_t21,_t22,_t23,_t24,_t25;\
++_t13         = _mm_unpackhi_ps(_x1,_y1);\
++_x1          = _mm_unpacklo_ps(_x1,_y1);\
++_t14         = _mm_unpackhi_ps(_z1,_x2);\
++_z1          = _mm_unpacklo_ps(_z1,_x2);\
++_t15         = _mm_unpackhi_ps(_y2,_z2);\
++_y2          = _mm_unpacklo_ps(_y2,_z2);\
++_t16         = _mm_unpackhi_ps(_x3,_y3);\
++_x3          = _mm_unpacklo_ps(_x3,_y3);\
++_t17         = _mm_shuffle_ps(_z3,_z3,_MM_SHUFFLE(0,0,0,1));\
++_t18         = _mm_movehl_ps(_z3,_z3);\
++_t19         = _mm_shuffle_ps(_t18,_t18,_MM_SHUFFLE(0,0,0,1));\
++_t20         = _mm_movelh_ps(_x1,_z1);\
++_t21         = _mm_movehl_ps(_z1,_x1);\
++_t22         = _mm_movelh_ps(_t13,_t14);\
++_t14         = _mm_movehl_ps(_t14,_t13);\
++_t23         = _mm_movelh_ps(_y2,_x3);\
++_t24         = _mm_movehl_ps(_x3,_y2);\
++_t25         = _mm_movelh_ps(_t15,_t16);\
++_t16         = _mm_movehl_ps(_t16,_t15);\
++_t1          = _mm_loadu_ps(ptrA);\
++_t2          = _mm_loadu_ps(ptrA+4);\
++_t3          = _mm_load_ss(ptrA+8);\
++_t1          = _mm_sub_ps(_t1,_t20);\
++_t2          = _mm_sub_ps(_t2,_t23);\
++_t3          = _mm_sub_ss(_t3,_z3);\
++_mm_storeu_ps(ptrA,_t1);\
++_mm_storeu_ps(ptrA+4,_t2);\
++_mm_store_ss(ptrA+8,_t3);\
++_t4          = _mm_loadu_ps(ptrB);\
++_t5          = _mm_loadu_ps(ptrB+4);\
++_t6          = _mm_load_ss(ptrB+8);\
++_t4          = _mm_sub_ps(_t4,_t21);\
++_t5          = _mm_sub_ps(_t5,_t24);\
++_t6          = _mm_sub_ss(_t6,_t17);\
++_mm_storeu_ps(ptrB,_t4);\
++_mm_storeu_ps(ptrB+4,_t5);\
++_mm_store_ss(ptrB+8,_t6);\
++_t7          = _mm_loadu_ps(ptrC);\
++_t8          = _mm_loadu_ps(ptrC+4);\
++_t9          = _mm_load_ss(ptrC+8);\
++_t7          = _mm_sub_ps(_t7,_t22);\
++_t8          = _mm_sub_ps(_t8,_t25);\
++_t9          = _mm_sub_ss(_t9,_t18);\
++_mm_storeu_ps(ptrC,_t7);\
++_mm_storeu_ps(ptrC+4,_t8);\
++_mm_store_ss(ptrC+8,_t9);\
++_t10         = _mm_loadu_ps(ptrD);\
++_t11         = _mm_loadu_ps(ptrD+4);\
++_t12         = _mm_load_ss(ptrD+8);\
++_t10         = _mm_sub_ps(_t10,_t14);\
++_t11         = _mm_sub_ps(_t11,_t16);\
++_t12         = _mm_sub_ss(_t12,_t19);\
++_mm_storeu_ps(ptrD,_t10);\
++_mm_storeu_ps(ptrD+4,_t11);\
++_mm_store_ss(ptrD+8,_t12);\
++}
++#else
++/* Real function for sane compilers */
 +static void
 +gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
 +                                       float * gmx_restrict ptrC, float * gmx_restrict ptrD,
 +                                       __m128 x1, __m128 y1, __m128 z1,
 +                                       __m128 x2, __m128 y2, __m128 z2,
++                                       __m128 x3, __m128 y3, __m128 z3)
 +{
 +    __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
 +    __m128 t11,t12,t13,t14,t15,t16,t17,t18,t19;
 +    __m128 t20,t21,t22,t23,t24,t25;
 +
 +    t13         = _mm_unpackhi_ps(x1,y1);
 +    x1          = _mm_unpacklo_ps(x1,y1);
 +    t14         = _mm_unpackhi_ps(z1,x2);
 +    z1          = _mm_unpacklo_ps(z1,x2);
 +    t15         = _mm_unpackhi_ps(y2,z2);
 +    y2          = _mm_unpacklo_ps(y2,z2);
 +    t16         = _mm_unpackhi_ps(x3,y3);
 +    x3          = _mm_unpacklo_ps(x3,y3);
 +    t17         = _mm_shuffle_ps(z3,z3,_MM_SHUFFLE(0,0,0,1));
 +    t18         = _mm_movehl_ps(z3,z3);
 +    t19         = _mm_shuffle_ps(t18,t18,_MM_SHUFFLE(0,0,0,1));
 +    t20         = _mm_movelh_ps(x1,z1);
 +    t21         = _mm_movehl_ps(z1,x1);
 +    t22         = _mm_movelh_ps(t13,t14);
 +    t14         = _mm_movehl_ps(t14,t13);
 +    t23         = _mm_movelh_ps(y2,x3);
 +    t24         = _mm_movehl_ps(x3,y2);
 +    t25         = _mm_movelh_ps(t15,t16);
 +    t16         = _mm_movehl_ps(t16,t15);
 +    t1          = _mm_loadu_ps(ptrA);
 +    t2          = _mm_loadu_ps(ptrA+4);
 +    t3          = _mm_load_ss(ptrA+8);
 +    t1          = _mm_sub_ps(t1,t20);
 +    t2          = _mm_sub_ps(t2,t23);
 +    t3          = _mm_sub_ss(t3,z3);
 +    _mm_storeu_ps(ptrA,t1);
 +    _mm_storeu_ps(ptrA+4,t2);
 +    _mm_store_ss(ptrA+8,t3);
 +    t4          = _mm_loadu_ps(ptrB);
 +    t5          = _mm_loadu_ps(ptrB+4);
 +    t6          = _mm_load_ss(ptrB+8);
 +    t4          = _mm_sub_ps(t4,t21);
 +    t5          = _mm_sub_ps(t5,t24);
 +    t6          = _mm_sub_ss(t6,t17);
 +    _mm_storeu_ps(ptrB,t4);
 +    _mm_storeu_ps(ptrB+4,t5);
 +    _mm_store_ss(ptrB+8,t6);
 +    t7          = _mm_loadu_ps(ptrC);
 +    t8          = _mm_loadu_ps(ptrC+4);
 +    t9          = _mm_load_ss(ptrC+8);
 +    t7          = _mm_sub_ps(t7,t22);
 +    t8          = _mm_sub_ps(t8,t25);
 +    t9          = _mm_sub_ss(t9,t18);
 +    _mm_storeu_ps(ptrC,t7);
 +    _mm_storeu_ps(ptrC+4,t8);
 +    _mm_store_ss(ptrC+8,t9);
 +    t10         = _mm_loadu_ps(ptrD);
 +    t11         = _mm_loadu_ps(ptrD+4);
 +    t12         = _mm_load_ss(ptrD+8);
 +    t10         = _mm_sub_ps(t10,t14);
 +    t11         = _mm_sub_ps(t11,t16);
 +    t12         = _mm_sub_ss(t12,t19);
 +    _mm_storeu_ps(ptrD,t10);
 +    _mm_storeu_ps(ptrD+4,t11);
 +    _mm_store_ss(ptrD+8,t12);
 +}
-                                        __m128 x4, __m128 y4, __m128 z4) 
++#endif
++
++
++#if defined (_MSC_VER) && defined(_M_IX86)
++/* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
++#define gmx_mm_decrement_4rvec_4ptr_swizzle_ps(ptrA,ptrB,ptrC,ptrD, \
++_x1,_y1,_z1,_x2,_y2,_z2,_x3,_y3,_z3,_x4,_y4,_z4) \
++{\
++__m128 _t1,_t2,_t3,_t4,_t5,_t6,_t7,_t8,_t9,_t10,_t11;\
++__m128 _t12,_t13,_t14,_t15,_t16,_t17,_t18,_t19,_t20,_t21,_t22;\
++__m128 _t23,_t24;\
++_t13         = _mm_unpackhi_ps(_x1,_y1);\
++_x1          = _mm_unpacklo_ps(_x1,_y1);\
++_t14         = _mm_unpackhi_ps(_z1,_x2);\
++_z1          = _mm_unpacklo_ps(_z1,_x2);\
++_t15         = _mm_unpackhi_ps(_y2,_z2);\
++_y2          = _mm_unpacklo_ps(_y2,_z2);\
++_t16         = _mm_unpackhi_ps(_x3,_y3);\
++_x3          = _mm_unpacklo_ps(_x3,_y3);\
++_t17         = _mm_unpackhi_ps(_z3,_x4);\
++_z3          = _mm_unpacklo_ps(_z3,_x4);\
++_t18         = _mm_unpackhi_ps(_y4,_z4);\
++_y4          = _mm_unpacklo_ps(_y4,_z4);\
++_t19         = _mm_movelh_ps(_x1,_z1);\
++_z1          = _mm_movehl_ps(_z1,_x1);\
++_t20         = _mm_movelh_ps(_t13,_t14);\
++_t14         = _mm_movehl_ps(_t14,_t13);\
++_t21         = _mm_movelh_ps(_y2,_x3);\
++_x3          = _mm_movehl_ps(_x3,_y2);\
++_t22         = _mm_movelh_ps(_t15,_t16);\
++_t16         = _mm_movehl_ps(_t16,_t15);\
++_t23         = _mm_movelh_ps(_z3,_y4);\
++_y4          = _mm_movehl_ps(_y4,_z3);\
++_t24         = _mm_movelh_ps(_t17,_t18);\
++_t18         = _mm_movehl_ps(_t18,_t17);\
++_t1          = _mm_loadu_ps(ptrA);\
++_t2          = _mm_loadu_ps(ptrA+4);\
++_t3          = _mm_loadu_ps(ptrA+8);\
++_t1          = _mm_sub_ps(_t1,_t19);\
++_t2          = _mm_sub_ps(_t2,_t21);\
++_t3          = _mm_sub_ps(_t3,_t23);\
++_mm_storeu_ps(ptrA,_t1);\
++_mm_storeu_ps(ptrA+4,_t2);\
++_mm_storeu_ps(ptrA+8,_t3);\
++_t4          = _mm_loadu_ps(ptrB);\
++_t5          = _mm_loadu_ps(ptrB+4);\
++_t6          = _mm_loadu_ps(ptrB+8);\
++_t4          = _mm_sub_ps(_t4,_z1);\
++_t5          = _mm_sub_ps(_t5,_x3);\
++_t6          = _mm_sub_ps(_t6,_y4);\
++_mm_storeu_ps(ptrB,_t4);\
++_mm_storeu_ps(ptrB+4,_t5);\
++_mm_storeu_ps(ptrB+8,_t6);\
++_t7          = _mm_loadu_ps(ptrC);\
++_t8          = _mm_loadu_ps(ptrC+4);\
++_t9          = _mm_loadu_ps(ptrC+8);\
++_t7          = _mm_sub_ps(_t7,_t20);\
++_t8          = _mm_sub_ps(_t8,_t22);\
++_t9          = _mm_sub_ps(_t9,_t24);\
++_mm_storeu_ps(ptrC,_t7);\
++_mm_storeu_ps(ptrC+4,_t8);\
++_mm_storeu_ps(ptrC+8,_t9);\
++_t10         = _mm_loadu_ps(ptrD);\
++_t11         = _mm_loadu_ps(ptrD+4);\
++_t12         = _mm_loadu_ps(ptrD+8);\
++_t10         = _mm_sub_ps(_t10,_t14);\
++_t11         = _mm_sub_ps(_t11,_t16);\
++_t12         = _mm_sub_ps(_t12,_t18);\
++_mm_storeu_ps(ptrD,_t10);\
++_mm_storeu_ps(ptrD+4,_t11);\
++_mm_storeu_ps(ptrD+8,_t12);\
++}
++#else
++/* Real function for sane compilers */
 +static void
 +gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
 +                                       float * gmx_restrict ptrC, float * gmx_restrict ptrD,
 +                                       __m128 x1, __m128 y1, __m128 z1,
 +                                       __m128 x2, __m128 y2, __m128 z2,
 +                                       __m128 x3, __m128 y3, __m128 z3,
++                                       __m128 x4, __m128 y4, __m128 z4)
 +{
 +    __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11;
 +    __m128 t12,t13,t14,t15,t16,t17,t18,t19,t20,t21,t22;
 +    __m128 t23,t24;
 +    t13         = _mm_unpackhi_ps(x1,y1);
 +    x1          = _mm_unpacklo_ps(x1,y1);
 +    t14         = _mm_unpackhi_ps(z1,x2);
 +    z1          = _mm_unpacklo_ps(z1,x2);
 +    t15         = _mm_unpackhi_ps(y2,z2);
 +    y2          = _mm_unpacklo_ps(y2,z2);
 +    t16         = _mm_unpackhi_ps(x3,y3);
 +    x3          = _mm_unpacklo_ps(x3,y3);
 +    t17         = _mm_unpackhi_ps(z3,x4);
 +    z3          = _mm_unpacklo_ps(z3,x4);
 +    t18         = _mm_unpackhi_ps(y4,z4);
 +    y4          = _mm_unpacklo_ps(y4,z4);
 +    t19         = _mm_movelh_ps(x1,z1);
 +    z1          = _mm_movehl_ps(z1,x1);
 +    t20         = _mm_movelh_ps(t13,t14);
 +    t14         = _mm_movehl_ps(t14,t13);
 +    t21         = _mm_movelh_ps(y2,x3);
 +    x3          = _mm_movehl_ps(x3,y2);
 +    t22         = _mm_movelh_ps(t15,t16);
 +    t16         = _mm_movehl_ps(t16,t15);
 +    t23         = _mm_movelh_ps(z3,y4);
 +    y4          = _mm_movehl_ps(y4,z3);
 +    t24         = _mm_movelh_ps(t17,t18);
 +    t18         = _mm_movehl_ps(t18,t17);
 +    t1          = _mm_loadu_ps(ptrA);
 +    t2          = _mm_loadu_ps(ptrA+4);
 +    t3          = _mm_loadu_ps(ptrA+8);
 +    t1          = _mm_sub_ps(t1,t19);
 +    t2          = _mm_sub_ps(t2,t21);
 +    t3          = _mm_sub_ps(t3,t23);
 +    _mm_storeu_ps(ptrA,t1);
 +    _mm_storeu_ps(ptrA+4,t2);
 +    _mm_storeu_ps(ptrA+8,t3);
 +    t4          = _mm_loadu_ps(ptrB);
 +    t5          = _mm_loadu_ps(ptrB+4);
 +    t6          = _mm_loadu_ps(ptrB+8);
 +    t4          = _mm_sub_ps(t4,z1);
 +    t5          = _mm_sub_ps(t5,x3);
 +    t6          = _mm_sub_ps(t6,y4);
 +    _mm_storeu_ps(ptrB,t4);
 +    _mm_storeu_ps(ptrB+4,t5);
 +    _mm_storeu_ps(ptrB+8,t6);
 +    t7          = _mm_loadu_ps(ptrC);
 +    t8          = _mm_loadu_ps(ptrC+4);
 +    t9          = _mm_loadu_ps(ptrC+8);
 +    t7          = _mm_sub_ps(t7,t20);
 +    t8          = _mm_sub_ps(t8,t22);
 +    t9          = _mm_sub_ps(t9,t24);
 +    _mm_storeu_ps(ptrC,t7);
 +    _mm_storeu_ps(ptrC+4,t8);
 +    _mm_storeu_ps(ptrC+8,t9);
 +    t10         = _mm_loadu_ps(ptrD);
 +    t11         = _mm_loadu_ps(ptrD+4);
 +    t12         = _mm_loadu_ps(ptrD+8);
 +    t10         = _mm_sub_ps(t10,t14);
 +    t11         = _mm_sub_ps(t11,t16);
 +    t12         = _mm_sub_ps(t12,t18);
 +    _mm_storeu_ps(ptrD,t10);
 +    _mm_storeu_ps(ptrD+4,t11);
 +    _mm_storeu_ps(ptrD+8,t12);
 +}
++#endif
 +
 +
 +static gmx_inline void
 +gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
 +                                      float * gmx_restrict fptr,
 +                                      float * gmx_restrict fshiftptr)
 +{
 +    __m128 t1,t2,t3;
 +
 +    /* transpose data */
 +    t1 = fix1;
 +    _MM_TRANSPOSE4_PS(fix1,t1,fiy1,fiz1);
 +    fix1 = _mm_add_ps(_mm_add_ps(fix1,t1), _mm_add_ps(fiy1,fiz1));
 +
 +    t2 = _mm_load_ss(fptr);
 +    t2 = _mm_loadh_pi(t2,(__m64 *)(fptr+1));
 +    t3 = _mm_load_ss(fshiftptr);
 +    t3 = _mm_loadh_pi(t3,(__m64 *)(fshiftptr+1));
 +
 +    t2 = _mm_add_ps(t2,fix1);
 +    t3 = _mm_add_ps(t3,fix1);
 +
 +    _mm_store_ss(fptr,t2);
 +    _mm_storeh_pi((__m64 *)(fptr+1),t2);
 +    _mm_store_ss(fshiftptr,t3);
 +    _mm_storeh_pi((__m64 *)(fshiftptr+1),t3);
 +}
 +
++#if defined (_MSC_VER) && defined(_M_IX86)
++/* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
++#define gmx_mm_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3, \
++                                              fptr,fshiftptr) \
++{\
++    __m128 _t1,_t2,_t3,_t4;\
++\
++    _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);\
++    _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);\
++    _t2   = _mm_movehl_ps(_mm_setzero_ps(),fiz3);\
++    _t1   = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(0,0,0,1));\
++    _t3   = _mm_shuffle_ps(_t2,_t2,_MM_SHUFFLE(0,0,0,1));\
++    fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));\
++    fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));\
++    fiz3 = _mm_add_ss(_mm_add_ps(fiz3,_t1)  , _mm_add_ps(_t2,_t3));\
++    _mm_storeu_ps(fptr,  _mm_add_ps(fix1,_mm_loadu_ps(fptr)  ));\
++    _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));\
++    _mm_store_ss (fptr+8,_mm_add_ss(fiz3,_mm_load_ss(fptr+8) ));\
++    _t4 = _mm_load_ss(fshiftptr+2);\
++    _t4 = _mm_loadh_pi(_t4,(__m64 *)(fshiftptr));\
++    _t1 = _mm_shuffle_ps(fiz3,fix1,_MM_SHUFFLE(1,0,0,0));\
++    _t2 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(3,2,2,2));\
++    _t3 = _mm_shuffle_ps(fiy2,fix1,_MM_SHUFFLE(3,3,0,1));\
++    _t3 = _mm_shuffle_ps(_t3  ,_t3  ,_MM_SHUFFLE(1,2,0,0));\
++    _t1 = _mm_add_ps(_t1,_t2);\
++    _t3 = _mm_add_ps(_t3,_t4);\
++    _t1 = _mm_add_ps(_t1,_t3);\
++    _mm_store_ss(fshiftptr+2,_t1);\
++    _mm_storeh_pi((__m64 *)(fshiftptr),_t1);\
++}
++#else
++/* Real function for sane compilers */
 +static gmx_inline void
 +gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
 +                                      __m128 fix2, __m128 fiy2, __m128 fiz2,
 +                                      __m128 fix3, __m128 fiy3, __m128 fiz3,
 +                                      float * gmx_restrict fptr,
 +                                      float * gmx_restrict fshiftptr)
 +{
 +    __m128 t1,t2,t3,t4;
 +
 +    /* transpose data */
 +    _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);
 +    _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);
 +    t2   = _mm_movehl_ps(_mm_setzero_ps(),fiz3);
 +    t1   = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(0,0,0,1));
 +    t3   = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,1));
 +
 +    fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));
 +    fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));
 +    fiz3 = _mm_add_ss(_mm_add_ps(fiz3,t1)  , _mm_add_ps(t2,t3));
 +
 +    _mm_storeu_ps(fptr,  _mm_add_ps(fix1,_mm_loadu_ps(fptr)  ));
 +    _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));
 +    _mm_store_ss (fptr+8,_mm_add_ss(fiz3,_mm_load_ss(fptr+8) ));
 +
 +    t4 = _mm_load_ss(fshiftptr+2);
 +    t4 = _mm_loadh_pi(t4,(__m64 *)(fshiftptr));
 +
 +    t1 = _mm_shuffle_ps(fiz3,fix1,_MM_SHUFFLE(1,0,0,0));   /* fiy1 fix1  -   fiz3 */
 +    t2 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(3,2,2,2));   /* fiy3 fix3  -   fiz1 */
 +    t3 = _mm_shuffle_ps(fiy2,fix1,_MM_SHUFFLE(3,3,0,1));   /* fix2 fix2 fiy2 fiz2 */
 +    t3 = _mm_shuffle_ps(t3  ,t3  ,_MM_SHUFFLE(1,2,0,0));   /* fiy2 fix2  -   fiz2 */
 +
 +    t1 = _mm_add_ps(t1,t2);
 +    t3 = _mm_add_ps(t3,t4);
 +    t1 = _mm_add_ps(t1,t3); /* y x - z */
 +
 +    _mm_store_ss(fshiftptr+2,t1);
 +    _mm_storeh_pi((__m64 *)(fshiftptr),t1);
 +}
++#endif
++
++#if defined (_MSC_VER) && defined(_M_IX86)
++/* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
++#define gmx_mm_update_iforce_4atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,fix4,fiy4,fiz4, \
++                                              fptr,fshiftptr) \
++{\
++    __m128 _t1,_t2,_t3,_t4,_t5;\
++    _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);\
++    _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);\
++    _MM_TRANSPOSE4_PS(fiz3,fix4,fiy4,fiz4);\
++    fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));\
++    fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));\
++    fiz3 = _mm_add_ps(_mm_add_ps(fiz3,fix4), _mm_add_ps(fiy4,fiz4));\
++    _mm_storeu_ps(fptr,  _mm_add_ps(fix1,_mm_loadu_ps(fptr)  ));\
++    _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));\
++    _mm_storeu_ps(fptr+8,_mm_add_ps(fiz3,_mm_loadu_ps(fptr+8)));\
++    _t5 = _mm_load_ss(fshiftptr+2);\
++    _t5 = _mm_loadh_pi(_t5,(__m64 *)(fshiftptr));\
++    _t1 = _mm_shuffle_ps(fix1,fix1,_MM_SHUFFLE(1,0,2,2));\
++    _t2 = _mm_shuffle_ps(fiy2,fiy2,_MM_SHUFFLE(3,2,1,1));\
++    _t3 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(2,1,0,0));\
++    _t4 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(0,0,3,3));\
++    _t4 = _mm_shuffle_ps(fiz3,_t4  ,_MM_SHUFFLE(2,0,3,3));\
++    _t1 = _mm_add_ps(_t1,_t2);\
++    _t3 = _mm_add_ps(_t3,_t4);\
++    _t1 = _mm_add_ps(_t1,_t3);\
++    _t5 = _mm_add_ps(_t5,_t1);\
++    _mm_store_ss(fshiftptr+2,_t5);\
++    _mm_storeh_pi((__m64 *)(fshiftptr),_t5);\
++}
++#else
++/* Real function for sane compilers */
 +static gmx_inline void
 +gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
 +                                      __m128 fix2, __m128 fiy2, __m128 fiz2,
 +                                      __m128 fix3, __m128 fiy3, __m128 fiz3,
 +                                      __m128 fix4, __m128 fiy4, __m128 fiz4,
 +                                      float * gmx_restrict fptr,
 +                                      float * gmx_restrict fshiftptr)
 +{
 +    __m128 t1,t2,t3,t4,t5;
 +
 +    /* transpose data */
 +    _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);
 +    _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);
 +    _MM_TRANSPOSE4_PS(fiz3,fix4,fiy4,fiz4);
 +
 +    fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));
 +    fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));
 +    fiz3 = _mm_add_ps(_mm_add_ps(fiz3,fix4), _mm_add_ps(fiy4,fiz4));
 +
 +    _mm_storeu_ps(fptr,  _mm_add_ps(fix1,_mm_loadu_ps(fptr)  ));
 +    _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));
 +    _mm_storeu_ps(fptr+8,_mm_add_ps(fiz3,_mm_loadu_ps(fptr+8)));
 +
 +    t5 = _mm_load_ss(fshiftptr+2);
 +    t5 = _mm_loadh_pi(t5,(__m64 *)(fshiftptr));
 +
 +    t1 = _mm_shuffle_ps(fix1,fix1,_MM_SHUFFLE(1,0,2,2));
 +    t2 = _mm_shuffle_ps(fiy2,fiy2,_MM_SHUFFLE(3,2,1,1));
 +    t3 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(2,1,0,0));
 +    t4 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(0,0,3,3));
 +    t4 = _mm_shuffle_ps(fiz3,t4  ,_MM_SHUFFLE(2,0,3,3));
 +
 +    t1 = _mm_add_ps(t1,t2);
 +    t3 = _mm_add_ps(t3,t4);
 +    t1 = _mm_add_ps(t1,t3);
 +    t5 = _mm_add_ps(t5,t1);
 +
 +    _mm_store_ss(fshiftptr+2,t5);
 +    _mm_storeh_pi((__m64 *)(fshiftptr),t5);
 +}
- static void
- gmx_mm_update_4pot_ps(__m128 pot1, float * gmx_restrict ptrA,
-                       __m128 pot2, float * gmx_restrict ptrB,
-                       __m128 pot3, float * gmx_restrict ptrC,
-                       __m128 pot4, float * gmx_restrict ptrD)
- {
-     _MM_TRANSPOSE4_PS(pot1,pot2,pot3,pot4);
-     pot1 = _mm_add_ps(_mm_add_ps(pot1,pot2),_mm_add_ps(pot3,pot4));
-     pot2 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(1,1,1,1));
-     pot3 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(2,2,2,2));
-     pot4 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(3,3,3,3));
-     _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
-     _mm_store_ss(ptrB,_mm_add_ss(pot2,_mm_load_ss(ptrB)));
-     _mm_store_ss(ptrC,_mm_add_ss(pot3,_mm_load_ss(ptrC)));
-     _mm_store_ss(ptrD,_mm_add_ss(pot4,_mm_load_ss(ptrD)));
- }
++#endif
 +
 +
 +static void
 +gmx_mm_update_1pot_ps(__m128 pot1, float * gmx_restrict ptrA)
 +{
 +    pot1 = _mm_add_ps(pot1,_mm_movehl_ps(_mm_setzero_ps(),pot1));
 +    pot1 = _mm_add_ps(pot1,_mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(0,0,0,1)));
 +    _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
 +}
 +
 +static void
 +gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
 +                      __m128 pot2, float * gmx_restrict ptrB)
 +{
 +    __m128 t1,t2;
 +    t1   = _mm_movehl_ps(pot2,pot1);
 +    t2   = _mm_movelh_ps(pot1,pot2);
 +    t1   = _mm_add_ps(t1,t2);
 +    t2   = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,1,1));
 +    pot1 = _mm_add_ps(t1,t2);
 +    pot2 = _mm_movehl_ps(t2,pot1);
 +    _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
 +    _mm_store_ss(ptrB,_mm_add_ss(pot2,_mm_load_ss(ptrB)));
 +}
 +
 +
 +#endif /* _kernelutil_x86_sse2_single_h_ */
index cee331911b3f87a7f27ab30f37e4f801a817b4f7,0000000000000000000000000000000000000000..0f19ea8bfb8d8baa4f375461a116b0bb308f69e6
mode 100644,000000..100644
--- /dev/null
@@@ -1,669 -1,0 +1,855 @@@
- #include <math.h> 
 +/*
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 2011-2012, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under 
 + * the terms of the GNU Lesser General Public License as published by the Free 
 + * Software Foundation; either version 2 of the License, or (at your option) any 
 + * later version.
 + * As a special exception, you may use this file as part of a free software
 + * library without restriction.  Specifically, if other files instantiate
 + * templates or use macros or inline functions from this file, or you compile
 + * this file and link it with other files to produce an executable, this
 + * file does not by itself cause the resulting executable to be covered by
 + * the GNU Lesser General Public License.  
 + *
 + * In plain-speak: do not worry about classes/macros/templates either - only
 + * changes to the library have to be LGPL, not an application linking with it.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website!
 + */
 +#ifndef _kernelutil_x86_sse4_1_single_h_
 +#define _kernelutil_x86_sse4_1_single_h_
 +
- #define gmx_restrict 
++#include <math.h>
 +
 +#include "gmx_x86_sse4_1.h"
 +
 +#undef gmx_restrict
-     
++#define gmx_restrict
 +
 +/* Normal sum of four xmm registers */
 +#define gmx_mm_sum4_ps(t0,t1,t2,t3)  _mm_add_ps(_mm_add_ps(t0,t1),_mm_add_ps(t2,t3))
 +
 +static gmx_inline __m128
 +gmx_mm_calc_rsq_ps(__m128 dx, __m128 dy, __m128 dz)
 +{
 +    return _mm_add_ps( _mm_add_ps( _mm_mul_ps(dx,dx), _mm_mul_ps(dy,dy) ), _mm_mul_ps(dz,dz) );
 +}
 +
 +static gmx_inline int
 +gmx_mm_any_lt(__m128 a, __m128 b)
 +{
 +    return _mm_movemask_ps(_mm_cmplt_ps(a,b));
 +}
 +
 +/* Load a single value from 1-4 places, merge into xmm register */
 +
 +static gmx_inline __m128
 +gmx_mm_load_4real_swizzle_ps(const float * gmx_restrict ptrA,
 +                             const float * gmx_restrict ptrB,
 +                             const float * gmx_restrict ptrC,
 +                             const float * gmx_restrict ptrD)
 +{
 +    __m128 t1,t2;
-     
-     t3       = _mm_movehl_ps(_mm_setzero_ps(),xmm1);               
-     t2       = _mm_shuffle_ps(xmm1,xmm1,_MM_SHUFFLE(1,1,1,1));     
-     t4       = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1)); 
-     _mm_store_ss(ptrA,xmm1);                                           
-     _mm_store_ss(ptrB,t2);                                         
-     _mm_store_ss(ptrC,t3);                                         
-     _mm_store_ss(ptrD,t4);                                         
++
 +    t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA),_mm_load_ss(ptrC));
 +    t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB),_mm_load_ss(ptrD));
 +    return _mm_unpacklo_ps(t1,t2);
 +}
 +
 +static gmx_inline void
 +gmx_mm_store_4real_swizzle_ps(float * gmx_restrict ptrA,
 +                              float * gmx_restrict ptrB,
 +                              float * gmx_restrict ptrC,
 +                              float * gmx_restrict ptrD,
 +                              __m128 xmm1)
 +{
 +    __m128 t2,t3,t4;
-     
++
++    t3       = _mm_movehl_ps(_mm_setzero_ps(),xmm1);
++    t2       = _mm_shuffle_ps(xmm1,xmm1,_MM_SHUFFLE(1,1,1,1));
++    t4       = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1));
++    _mm_store_ss(ptrA,xmm1);
++    _mm_store_ss(ptrB,t2);
++    _mm_store_ss(ptrC,t3);
++    _mm_store_ss(ptrD,t4);
 +}
 +
 +/* Similar to store, but increments value in memory */
 +static gmx_inline void
 +gmx_mm_increment_4real_swizzle_ps(float * gmx_restrict ptrA,
 +                                  float * gmx_restrict ptrB,
 +                                  float * gmx_restrict ptrC,
 +                                  float * gmx_restrict ptrD, __m128 xmm1)
 +{
 +    __m128 tmp;
-     
++
 +    tmp = gmx_mm_load_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD);
 +    tmp = _mm_add_ps(tmp,xmm1);
 +    gmx_mm_store_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD,tmp);
 +}
 +
 +
 +static gmx_inline void
 +gmx_mm_load_4pair_swizzle_ps(const float * gmx_restrict p1,
 +                             const float * gmx_restrict p2,
 +                             const float * gmx_restrict p3,
 +                             const float * gmx_restrict p4,
 +                             __m128 * gmx_restrict c6,
 +                             __m128 * gmx_restrict c12)
 +{
 +    __m128 t1,t2,t3,t4;
-                                          const float * gmx_restrict xyz,
-                                          __m128 * gmx_restrict x1,
-                                          __m128 * gmx_restrict y1,
-                                          __m128 * gmx_restrict z1)
++
 +    t1   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p1);   /* - - c12a  c6a */
 +    t2   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p2);   /* - - c12b  c6b */
 +    t3   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p3);   /* - - c12c  c6c */
 +    t4   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p4);   /* - - c12d  c6d */
 +    t1   = _mm_unpacklo_ps(t1,t2);
 +    t2   = _mm_unpacklo_ps(t3,t4);
 +    *c6  = _mm_movelh_ps(t1,t2);
 +    *c12 = _mm_movehl_ps(t2,t1);
 +}
 +
 +
 +static gmx_inline void
 +gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
-     
++        const float * gmx_restrict xyz,
++        __m128 * gmx_restrict x1,
++        __m128 * gmx_restrict y1,
++        __m128 * gmx_restrict z1)
 +{
 +    __m128 t1,t2,t3,t4;
-     
++
 +    t1   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
 +    t2   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz);
 +    t3   = _mm_load_ss(xyz_shift+2);
 +    t4   = _mm_load_ss(xyz+2);
 +    t1   = _mm_add_ps(t1,t2);
 +    t3   = _mm_add_ss(t3,t4);
-                                          const float * gmx_restrict xyz,
-                                          __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
-                                          __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
-                                          __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
++
 +    *x1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
 +    *y1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
 +    *z1  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
 +}
 +
 +
 +static gmx_inline void
 +gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
-     
++        const float * gmx_restrict xyz,
++        __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
++        __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
++        __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
 +{
 +    __m128 tA,tB;
 +    __m128 t1,t2,t3,t4,t5,t6;
-     
++
 +    tA   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
 +    tB   = _mm_load_ss(xyz_shift+2);
 +
 +    t1   = _mm_loadu_ps(xyz);
 +    t2   = _mm_loadu_ps(xyz+4);
 +    t3   = _mm_load_ss(xyz+8);
 +
 +    tA   = _mm_movelh_ps(tA,tB);
 +    t4   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
 +    t5   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
 +    t6   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
-     
++
 +    t1   = _mm_add_ps(t1,t4);
 +    t2   = _mm_add_ps(t2,t5);
 +    t3   = _mm_add_ss(t3,t6);
-                                          const float * gmx_restrict xyz,
-                                          __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
-                                          __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
-                                          __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
-                                          __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
++
 +    *x1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
 +    *y1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
 +    *z1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
 +    *x2  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
 +    *y2  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
 +    *z2  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
 +    *x3  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
 +    *y3  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
 +    *z3  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
 +}
 +
 +
 +static gmx_inline void
 +gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
-     
++        const float * gmx_restrict xyz,
++        __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
++        __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
++        __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
++        __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
 +{
 +    __m128 tA,tB;
 +    __m128 t1,t2,t3,t4,t5,t6;
-     
++
 +    tA   = _mm_castpd_ps(_mm_load_sd((const double *)xyz_shift));
 +    tB   = _mm_load_ss(xyz_shift+2);
-     
++
 +    t1   = _mm_loadu_ps(xyz);
 +    t2   = _mm_loadu_ps(xyz+4);
 +    t3   = _mm_loadu_ps(xyz+8);
-     
++
 +    tA   = _mm_movelh_ps(tA,tB);
 +    t4   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
 +    t5   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
 +    t6   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
-     
++
 +    t1   = _mm_add_ps(t1,t4);
 +    t2   = _mm_add_ps(t2,t5);
 +    t3   = _mm_add_ps(t3,t6);
-                                   __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3) 
++
 +    *x1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
 +    *y1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
 +    *z1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
 +    *x2  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
 +    *y2  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
 +    *z2  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
 +    *x3  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
 +    *y3  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
 +    *z3  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
 +    *x4  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1));
 +    *y4  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(2,2,2,2));
 +    *z4  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(3,3,3,3));
 +}
 +
 +
 +static gmx_inline void
 +gmx_mm_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
 +                                  const float * gmx_restrict ptrB,
 +                                  const float * gmx_restrict ptrC,
 +                                  const float * gmx_restrict ptrD,
 +                                  __m128 *      gmx_restrict x1,
 +                                  __m128 *      gmx_restrict y1,
 +                                  __m128 *      gmx_restrict z1)
 +{
 +    __m128 t1,t2,t3,t4,t5,t6,t7,t8;
 +    t1   = _mm_castpd_ps(_mm_load_sd((const double *)ptrA));
 +    t2   = _mm_castpd_ps(_mm_load_sd((const double *)ptrB));
 +    t3   = _mm_castpd_ps(_mm_load_sd((const double *)ptrC));
 +    t4   = _mm_castpd_ps(_mm_load_sd((const double *)ptrD));
 +    t5 = _mm_load_ss(ptrA+2);
 +    t6 = _mm_load_ss(ptrB+2);
 +    t7 = _mm_load_ss(ptrC+2);
 +    t8 = _mm_load_ss(ptrD+2);
 +    t1 = _mm_unpacklo_ps(t1,t2);
 +    t3 = _mm_unpacklo_ps(t3,t4);
 +    *x1 = _mm_movelh_ps(t1,t3);
 +    *y1 = _mm_movehl_ps(t3,t1);
 +    t5  = _mm_unpacklo_ps(t5,t6);
 +    t7  = _mm_unpacklo_ps(t7,t8);
 +    *z1 = _mm_movelh_ps(t5,t7);
 +}
 +
 +
 +static gmx_inline void
 +gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
 +                                  const float * gmx_restrict ptrB,
 +                                  const float * gmx_restrict ptrC,
 +                                  const float * gmx_restrict ptrD,
 +                                  __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
 +                                  __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
-                                   __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4) 
++                                  __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
 +{
 +    __m128 t1,t2,t3,t4;
 +    t1            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)ptrA ) );
 +    t2            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)ptrB ) );
 +    t3            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)ptrC ) );
 +    t4            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)ptrD ) );
 +    _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
 +    *x1           = t1;
 +    *y1           = t2;
 +    *z1           = t3;
 +    *x2           = t4;
 +    t1            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrA+4) ) );
 +    t2            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrB+4) ) );
 +    t3            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrC+4) ) );
 +    t4            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrD+4) ) );
 +    _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
 +    *y2           = t1;
 +    *z2           = t2;
 +    *x3           = t3;
 +    *y3           = t4;
 +    t1            = _mm_load_ss(ptrA+8);
 +    t2            = _mm_load_ss(ptrB+8);
 +    t3            = _mm_load_ss(ptrC+8);
 +    t4            = _mm_load_ss(ptrD+8);
 +    t1            = _mm_unpacklo_ps(t1,t3);
 +    t3            = _mm_unpacklo_ps(t2,t4);
 +    *z3           = _mm_unpacklo_ps(t1,t3);
 +}
 +
 +
 +static gmx_inline void
 +gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
 +                                  const float * gmx_restrict ptrB,
 +                                  const float * gmx_restrict ptrC,
 +                                  const float * gmx_restrict ptrD,
 +                                  __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
 +                                  __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
 +                                  __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
-                                        __m128 x3, __m128 y3, __m128 z3) 
++                                  __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
 +{
 +    __m128 t1,t2,t3,t4;
 +    t1            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrA) ) );
 +    t2            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrB) ) );
 +    t3            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrC) ) );
 +    t4            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrD) ) );
 +    _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
 +    *x1           = t1;
 +    *y1           = t2;
 +    *z1           = t3;
 +    *x2           = t4;
 +    t1            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrA+4) ) );
 +    t2            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrB+4) ) );
 +    t3            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrC+4) ) );
 +    t4            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrD+4) ) );
 +    _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
 +    *y2           = t1;
 +    *z2           = t2;
 +    *x3           = t3;
 +    *y3           = t4;
 +    t1            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrA+8) ) );
 +    t2            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrB+8) ) );
 +    t3            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrC+8) ) );
 +    t4            = gmx_mm_castsi128_ps( _mm_lddqu_si128( (void *)(ptrD+8) ) );
 +    _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
 +    *z3           = t1;
 +    *x4           = t2;
 +    *y4           = t3;
 +    *z4           = t4;
 +}
 +
 +
 +
 +static gmx_inline void
 +gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * ptrA,
 +                                       float * ptrB,
 +                                       float * ptrC,
 +                                       float * ptrD,
 +                                       __m128 x1, __m128 y1, __m128 z1)
 +{
 +    __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12;
 +    t5          = _mm_unpacklo_ps(y1,z1);
 +    t6          = _mm_unpackhi_ps(y1,z1);
 +    t7          = _mm_shuffle_ps(x1,t5,_MM_SHUFFLE(1,0,0,0));
 +    t8          = _mm_shuffle_ps(x1,t5,_MM_SHUFFLE(3,2,0,1));
 +    t9          = _mm_shuffle_ps(x1,t6,_MM_SHUFFLE(1,0,0,2));
 +    t10         = _mm_shuffle_ps(x1,t6,_MM_SHUFFLE(3,2,0,3));
 +    t1          = _mm_load_ss(ptrA);
 +    t1          = _mm_loadh_pi(t1,(__m64 *)(ptrA+1));
 +    t1          = _mm_sub_ps(t1,t7);
 +    _mm_store_ss(ptrA,t1);
 +    _mm_storeh_pi((__m64 *)(ptrA+1),t1);
 +    t2          = _mm_load_ss(ptrB);
 +    t2          = _mm_loadh_pi(t2,(__m64 *)(ptrB+1));
 +    t2          = _mm_sub_ps(t2,t8);
 +    _mm_store_ss(ptrB,t2);
 +    _mm_storeh_pi((__m64 *)(ptrB+1),t2);
 +    t3          = _mm_load_ss(ptrC);
 +    t3          = _mm_loadh_pi(t3,(__m64 *)(ptrC+1));
 +    t3          = _mm_sub_ps(t3,t9);
 +    _mm_store_ss(ptrC,t3);
 +    _mm_storeh_pi((__m64 *)(ptrC+1),t3);
 +    t4          = _mm_load_ss(ptrD);
 +    t4          = _mm_loadh_pi(t4,(__m64 *)(ptrD+1));
 +    t4          = _mm_sub_ps(t4,t10);
 +    _mm_store_ss(ptrD,t4);
 +    _mm_storeh_pi((__m64 *)(ptrD+1),t4);
 +}
 +
 +
 +
++#if defined (_MSC_VER) && defined(_M_IX86)
++/* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
++#define gmx_mm_decrement_3rvec_4ptr_swizzle_ps(ptrA,ptrB,ptrC,ptrD, \
++_x1,_y1,_z1,_x2,_y2,_z2,_x3,_y3,_z3) \
++{\
++__m128 _t1,_t2,_t3,_t4,_t5,_t6,_t7,_t8,_t9,_t10;\
++__m128 _t11,_t12,_t13,_t14,_t15,_t16,_t17,_t18,_t19;\
++__m128 _t20,_t21,_t22,_t23,_t24,_t25;\
++_t13         = _mm_unpackhi_ps(_x1,_y1);\
++_x1          = _mm_unpacklo_ps(_x1,_y1);\
++_t14         = _mm_unpackhi_ps(_z1,_x2);\
++_z1          = _mm_unpacklo_ps(_z1,_x2);\
++_t15         = _mm_unpackhi_ps(_y2,_z2);\
++_y2          = _mm_unpacklo_ps(_y2,_z2);\
++_t16         = _mm_unpackhi_ps(_x3,_y3);\
++_x3          = _mm_unpacklo_ps(_x3,_y3);\
++_t17         = _mm_shuffle_ps(_z3,_z3,_MM_SHUFFLE(0,0,0,1));\
++_t18         = _mm_movehl_ps(_z3,_z3);\
++_t19         = _mm_shuffle_ps(_t18,_t18,_MM_SHUFFLE(0,0,0,1));\
++_t20         = _mm_movelh_ps(_x1,_z1);\
++_t21         = _mm_movehl_ps(_z1,_x1);\
++_t22         = _mm_movelh_ps(_t13,_t14);\
++_t14         = _mm_movehl_ps(_t14,_t13);\
++_t23         = _mm_movelh_ps(_y2,_x3);\
++_t24         = _mm_movehl_ps(_x3,_y2);\
++_t25         = _mm_movelh_ps(_t15,_t16);\
++_t16         = _mm_movehl_ps(_t16,_t15);\
++_t1          = _mm_loadu_ps(ptrA);\
++_t2          = _mm_loadu_ps(ptrA+4);\
++_t3          = _mm_load_ss(ptrA+8);\
++_t1          = _mm_sub_ps(_t1,_t20);\
++_t2          = _mm_sub_ps(_t2,_t23);\
++_t3          = _mm_sub_ss(_t3,_z3);\
++_mm_storeu_ps(ptrA,_t1);\
++_mm_storeu_ps(ptrA+4,_t2);\
++_mm_store_ss(ptrA+8,_t3);\
++_t4          = _mm_loadu_ps(ptrB);\
++_t5          = _mm_loadu_ps(ptrB+4);\
++_t6          = _mm_load_ss(ptrB+8);\
++_t4          = _mm_sub_ps(_t4,_t21);\
++_t5          = _mm_sub_ps(_t5,_t24);\
++_t6          = _mm_sub_ss(_t6,_t17);\
++_mm_storeu_ps(ptrB,_t4);\
++_mm_storeu_ps(ptrB+4,_t5);\
++_mm_store_ss(ptrB+8,_t6);\
++_t7          = _mm_loadu_ps(ptrC);\
++_t8          = _mm_loadu_ps(ptrC+4);\
++_t9          = _mm_load_ss(ptrC+8);\
++_t7          = _mm_sub_ps(_t7,_t22);\
++_t8          = _mm_sub_ps(_t8,_t25);\
++_t9          = _mm_sub_ss(_t9,_t18);\
++_mm_storeu_ps(ptrC,_t7);\
++_mm_storeu_ps(ptrC+4,_t8);\
++_mm_store_ss(ptrC+8,_t9);\
++_t10         = _mm_loadu_ps(ptrD);\
++_t11         = _mm_loadu_ps(ptrD+4);\
++_t12         = _mm_load_ss(ptrD+8);\
++_t10         = _mm_sub_ps(_t10,_t14);\
++_t11         = _mm_sub_ps(_t11,_t16);\
++_t12         = _mm_sub_ss(_t12,_t19);\
++_mm_storeu_ps(ptrD,_t10);\
++_mm_storeu_ps(ptrD+4,_t11);\
++_mm_store_ss(ptrD+8,_t12);\
++}
++#else
++/* Real function for sane compilers */
 +static gmx_inline void
 +gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
 +                                       float * gmx_restrict ptrC, float * gmx_restrict ptrD,
 +                                       __m128 x1, __m128 y1, __m128 z1,
 +                                       __m128 x2, __m128 y2, __m128 z2,
-     
++                                       __m128 x3, __m128 y3, __m128 z3)
 +{
 +    __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
 +    __m128 t11,t12,t13,t14,t15,t16,t17,t18,t19;
 +    __m128 t20,t21,t22,t23,t24,t25;
 +
 +    t13         = _mm_unpackhi_ps(x1,y1);
 +    x1          = _mm_unpacklo_ps(x1,y1);
 +    t14         = _mm_unpackhi_ps(z1,x2);
 +    z1          = _mm_unpacklo_ps(z1,x2);
 +    t15         = _mm_unpackhi_ps(y2,z2);
 +    y2          = _mm_unpacklo_ps(y2,z2);
 +    t16         = _mm_unpackhi_ps(x3,y3);
 +    x3          = _mm_unpacklo_ps(x3,y3);
 +    t17         = _mm_shuffle_ps(z3,z3,_MM_SHUFFLE(0,0,0,1));
 +    t18         = _mm_movehl_ps(z3,z3);
 +    t19         = _mm_shuffle_ps(t18,t18,_MM_SHUFFLE(0,0,0,1));
 +    t20         = _mm_movelh_ps(x1,z1);
 +    t21         = _mm_movehl_ps(z1,x1);
 +    t22         = _mm_movelh_ps(t13,t14);
 +    t14         = _mm_movehl_ps(t14,t13);
 +    t23         = _mm_movelh_ps(y2,x3);
 +    t24         = _mm_movehl_ps(x3,y2);
 +    t25         = _mm_movelh_ps(t15,t16);
 +    t16         = _mm_movehl_ps(t16,t15);
 +    t1          = _mm_loadu_ps(ptrA);
 +    t2          = _mm_loadu_ps(ptrA+4);
 +    t3          = _mm_load_ss(ptrA+8);
 +    t4          = _mm_loadu_ps(ptrB);
 +    t5          = _mm_loadu_ps(ptrB+4);
 +    t6          = _mm_load_ss(ptrB+8);
 +    t7          = _mm_loadu_ps(ptrC);
 +    t8          = _mm_loadu_ps(ptrC+4);
 +    t9          = _mm_load_ss(ptrC+8);
 +    t10         = _mm_loadu_ps(ptrD);
 +    t11         = _mm_loadu_ps(ptrD+4);
 +    t12         = _mm_load_ss(ptrD+8);
++
 +    t1          = _mm_sub_ps(t1,t20);
 +    t2          = _mm_sub_ps(t2,t23);
 +    t3          = _mm_sub_ss(t3,z3);
 +    _mm_storeu_ps(ptrA,t1);
 +    _mm_storeu_ps(ptrA+4,t2);
 +    _mm_store_ss(ptrA+8,t3);
 +    t4          = _mm_sub_ps(t4,t21);
 +    t5          = _mm_sub_ps(t5,t24);
 +    t6          = _mm_sub_ss(t6,t17);
 +    _mm_storeu_ps(ptrB,t4);
 +    _mm_storeu_ps(ptrB+4,t5);
 +    _mm_store_ss(ptrB+8,t6);
 +    t7          = _mm_sub_ps(t7,t22);
 +    t8          = _mm_sub_ps(t8,t25);
 +    t9          = _mm_sub_ss(t9,t18);
 +    _mm_storeu_ps(ptrC,t7);
 +    _mm_storeu_ps(ptrC+4,t8);
 +    _mm_store_ss(ptrC+8,t9);
 +    t10         = _mm_sub_ps(t10,t14);
 +    t11         = _mm_sub_ps(t11,t16);
 +    t12         = _mm_sub_ss(t12,t19);
 +    _mm_storeu_ps(ptrD,t10);
 +    _mm_storeu_ps(ptrD+4,t11);
 +    _mm_store_ss(ptrD+8,t12);
 +}
-                                        __m128 x4, __m128 y4, __m128 z4) 
++#endif
++
++#if defined (_MSC_VER) && defined(_M_IX86)
++/* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
++#define gmx_mm_decrement_4rvec_4ptr_swizzle_ps(ptrA,ptrB,ptrC,ptrD, \
++_x1,_y1,_z1,_x2,_y2,_z2,_x3,_y3,_z3,_x4,_y4,_z4) \
++{\
++__m128 _t1,_t2,_t3,_t4,_t5,_t6,_t7,_t8,_t9,_t10,_t11;\
++__m128 _t12,_t13,_t14,_t15,_t16,_t17,_t18,_t19,_t20,_t21,_t22;\
++__m128 _t23,_t24;\
++_t13         = _mm_unpackhi_ps(_x1,_y1);\
++_x1          = _mm_unpacklo_ps(_x1,_y1);\
++_t14         = _mm_unpackhi_ps(_z1,_x2);\
++_z1          = _mm_unpacklo_ps(_z1,_x2);\
++_t15         = _mm_unpackhi_ps(_y2,_z2);\
++_y2          = _mm_unpacklo_ps(_y2,_z2);\
++_t16         = _mm_unpackhi_ps(_x3,_y3);\
++_x3          = _mm_unpacklo_ps(_x3,_y3);\
++_t17         = _mm_unpackhi_ps(_z3,_x4);\
++_z3          = _mm_unpacklo_ps(_z3,_x4);\
++_t18         = _mm_unpackhi_ps(_y4,_z4);\
++_y4          = _mm_unpacklo_ps(_y4,_z4);\
++_t19         = _mm_movelh_ps(_x1,_z1);\
++_z1          = _mm_movehl_ps(_z1,_x1);\
++_t20         = _mm_movelh_ps(_t13,_t14);\
++_t14         = _mm_movehl_ps(_t14,_t13);\
++_t21         = _mm_movelh_ps(_y2,_x3);\
++_x3          = _mm_movehl_ps(_x3,_y2);\
++_t22         = _mm_movelh_ps(_t15,_t16);\
++_t16         = _mm_movehl_ps(_t16,_t15);\
++_t23         = _mm_movelh_ps(_z3,_y4);\
++_y4          = _mm_movehl_ps(_y4,_z3);\
++_t24         = _mm_movelh_ps(_t17,_t18);\
++_t18         = _mm_movehl_ps(_t18,_t17);\
++_t1          = _mm_loadu_ps(ptrA);\
++_t2          = _mm_loadu_ps(ptrA+4);\
++_t3          = _mm_loadu_ps(ptrA+8);\
++_t1          = _mm_sub_ps(_t1,_t19);\
++_t2          = _mm_sub_ps(_t2,_t21);\
++_t3          = _mm_sub_ps(_t3,_t23);\
++_mm_storeu_ps(ptrA,_t1);\
++_mm_storeu_ps(ptrA+4,_t2);\
++_mm_storeu_ps(ptrA+8,_t3);\
++_t4          = _mm_loadu_ps(ptrB);\
++_t5          = _mm_loadu_ps(ptrB+4);\
++_t6          = _mm_loadu_ps(ptrB+8);\
++_t4          = _mm_sub_ps(_t4,_z1);\
++_t5          = _mm_sub_ps(_t5,_x3);\
++_t6          = _mm_sub_ps(_t6,_y4);\
++_mm_storeu_ps(ptrB,_t4);\
++_mm_storeu_ps(ptrB+4,_t5);\
++_mm_storeu_ps(ptrB+8,_t6);\
++_t7          = _mm_loadu_ps(ptrC);\
++_t8          = _mm_loadu_ps(ptrC+4);\
++_t9          = _mm_loadu_ps(ptrC+8);\
++_t7          = _mm_sub_ps(_t7,_t20);\
++_t8          = _mm_sub_ps(_t8,_t22);\
++_t9          = _mm_sub_ps(_t9,_t24);\
++_mm_storeu_ps(ptrC,_t7);\
++_mm_storeu_ps(ptrC+4,_t8);\
++_mm_storeu_ps(ptrC+8,_t9);\
++_t10         = _mm_loadu_ps(ptrD);\
++_t11         = _mm_loadu_ps(ptrD+4);\
++_t12         = _mm_loadu_ps(ptrD+8);\
++_t10         = _mm_sub_ps(_t10,_t14);\
++_t11         = _mm_sub_ps(_t11,_t16);\
++_t12         = _mm_sub_ps(_t12,_t18);\
++_mm_storeu_ps(ptrD,_t10);\
++_mm_storeu_ps(ptrD+4,_t11);\
++_mm_storeu_ps(ptrD+8,_t12);\
++}
++#else
++/* Real function for sane compilers */
 +static gmx_inline void
 +gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
 +                                       float * gmx_restrict ptrC, float * gmx_restrict ptrD,
 +                                       __m128 x1, __m128 y1, __m128 z1,
 +                                       __m128 x2, __m128 y2, __m128 z2,
 +                                       __m128 x3, __m128 y3, __m128 z3,
++                                       __m128 x4, __m128 y4, __m128 z4)
 +{
 +    __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11;
 +    __m128 t12,t13,t14,t15,t16,t17,t18,t19,t20,t21,t22;
 +    __m128 t23,t24;
 +    t13         = _mm_unpackhi_ps(x1,y1);
 +    x1          = _mm_unpacklo_ps(x1,y1);
 +    t14         = _mm_unpackhi_ps(z1,x2);
 +    z1          = _mm_unpacklo_ps(z1,x2);
 +    t15         = _mm_unpackhi_ps(y2,z2);
 +    y2          = _mm_unpacklo_ps(y2,z2);
 +    t16         = _mm_unpackhi_ps(x3,y3);
 +    x3          = _mm_unpacklo_ps(x3,y3);
 +    t17         = _mm_unpackhi_ps(z3,x4);
 +    z3          = _mm_unpacklo_ps(z3,x4);
 +    t18         = _mm_unpackhi_ps(y4,z4);
 +    y4          = _mm_unpacklo_ps(y4,z4);
 +    t19         = _mm_movelh_ps(x1,z1);
 +    z1          = _mm_movehl_ps(z1,x1);
 +    t20         = _mm_movelh_ps(t13,t14);
 +    t14         = _mm_movehl_ps(t14,t13);
 +    t21         = _mm_movelh_ps(y2,x3);
 +    x3          = _mm_movehl_ps(x3,y2);
 +    t22         = _mm_movelh_ps(t15,t16);
 +    t16         = _mm_movehl_ps(t16,t15);
 +    t23         = _mm_movelh_ps(z3,y4);
 +    y4          = _mm_movehl_ps(y4,z3);
 +    t24         = _mm_movelh_ps(t17,t18);
 +    t18         = _mm_movehl_ps(t18,t17);
 +    t1          = _mm_loadu_ps(ptrA);
 +    t2          = _mm_loadu_ps(ptrA+4);
 +    t3          = _mm_loadu_ps(ptrA+8);
 +    t1          = _mm_sub_ps(t1,t19);
 +    t2          = _mm_sub_ps(t2,t21);
 +    t3          = _mm_sub_ps(t3,t23);
 +    _mm_storeu_ps(ptrA,t1);
 +    _mm_storeu_ps(ptrA+4,t2);
 +    _mm_storeu_ps(ptrA+8,t3);
 +    t4          = _mm_loadu_ps(ptrB);
 +    t5          = _mm_loadu_ps(ptrB+4);
 +    t6          = _mm_loadu_ps(ptrB+8);
 +    t4          = _mm_sub_ps(t4,z1);
 +    t5          = _mm_sub_ps(t5,x3);
 +    t6          = _mm_sub_ps(t6,y4);
 +    _mm_storeu_ps(ptrB,t4);
 +    _mm_storeu_ps(ptrB+4,t5);
 +    _mm_storeu_ps(ptrB+8,t6);
 +    t7          = _mm_loadu_ps(ptrC);
 +    t8          = _mm_loadu_ps(ptrC+4);
 +    t9          = _mm_loadu_ps(ptrC+8);
 +    t7          = _mm_sub_ps(t7,t20);
 +    t8          = _mm_sub_ps(t8,t22);
 +    t9          = _mm_sub_ps(t9,t24);
 +    _mm_storeu_ps(ptrC,t7);
 +    _mm_storeu_ps(ptrC+4,t8);
 +    _mm_storeu_ps(ptrC+8,t9);
 +    t10         = _mm_loadu_ps(ptrD);
 +    t11         = _mm_loadu_ps(ptrD+4);
 +    t12         = _mm_loadu_ps(ptrD+8);
 +    t10         = _mm_sub_ps(t10,t14);
 +    t11         = _mm_sub_ps(t11,t16);
 +    t12         = _mm_sub_ps(t12,t18);
 +    _mm_storeu_ps(ptrD,t10);
 +    _mm_storeu_ps(ptrD+4,t11);
 +    _mm_storeu_ps(ptrD+8,t12);
 +}
-       __m128 t2,t3;
-       
++#endif
 +
 +
 +static gmx_inline void
 +gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
 +                                      float * gmx_restrict fptr,
 +                                      float * gmx_restrict fshiftptr)
 +{
-       fiy1 = _mm_hadd_ps(fiy1,fiz1);
-       
-       fix1 = _mm_hadd_ps(fix1,fiy1); /* fiz1 fiy1 fix1 fix1 */
-     
-       t2 = _mm_load_ss(fptr);
-       t2 = _mm_loadh_pi(t2,(__m64 *)(fptr+1));
-       t3 = _mm_load_ss(fshiftptr);
-       t3 = _mm_loadh_pi(t3,(__m64 *)(fshiftptr+1));
-       
-       t2 = _mm_add_ps(t2,fix1);
-       t3 = _mm_add_ps(t3,fix1);
-       
-       _mm_store_ss(fptr,t2);
-       _mm_storeh_pi((__m64 *)(fptr+1),t2);
-       _mm_store_ss(fshiftptr,t3);
-       _mm_storeh_pi((__m64 *)(fshiftptr+1),t3);
++    __m128 t2,t3;
++
 +    fix1 = _mm_hadd_ps(fix1,fix1);
-       __m128 t1,t2,t3,t4;
-       
-       fix1 = _mm_hadd_ps(fix1,fiy1);
-       fiz1 = _mm_hadd_ps(fiz1,fix2);
-       fiy2 = _mm_hadd_ps(fiy2,fiz2);
-       fix3 = _mm_hadd_ps(fix3,fiy3);
-       fiz3 = _mm_hadd_ps(fiz3,fiz3);
-       
-       fix1 = _mm_hadd_ps(fix1,fiz1); /* fix2 fiz1 fiy1 fix1 */
-       fiy2 = _mm_hadd_ps(fiy2,fix3); /* fiy3 fix3 fiz2 fiy2 */
-       fiz3 = _mm_hadd_ps(fiz3,fiz3); /*  -    -    -   fiz3 */
-     
-       _mm_storeu_ps(fptr,  _mm_add_ps(fix1,_mm_loadu_ps(fptr)  ));
-       _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));
-       _mm_store_ss (fptr+8,_mm_add_ss(fiz3,_mm_load_ss(fptr+8) ));
-       
-       t4 = _mm_load_ss(fshiftptr+2);
-       t4 = _mm_loadh_pi(t4,(__m64 *)(fshiftptr));
-       
-       t1 = _mm_shuffle_ps(fiz3,fix1,_MM_SHUFFLE(1,0,0,0));   /* fiy1 fix1  -   fiz3 */
-       t2 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(3,2,2,2));   /* fiy3 fix3  -   fiz1 */
-       t3 = _mm_shuffle_ps(fiy2,fix1,_MM_SHUFFLE(3,3,0,1));   /* fix2 fix2 fiy2 fiz2 */
-       t3 = _mm_shuffle_ps(t3  ,t3  ,_MM_SHUFFLE(1,2,0,0));   /* fiy2 fix2  -   fiz2 */
-     
-       t1 = _mm_add_ps(t1,t2);
-       t3 = _mm_add_ps(t3,t4);
-       t1 = _mm_add_ps(t1,t3); /* y x - z */
-       
-       _mm_store_ss(fshiftptr+2,t1);
-       _mm_storeh_pi((__m64 *)(fshiftptr),t1);
- }
++    fiy1 = _mm_hadd_ps(fiy1,fiz1);
++
++    fix1 = _mm_hadd_ps(fix1,fiy1); /* fiz1 fiy1 fix1 fix1 */
++
++    t2 = _mm_load_ss(fptr);
++    t2 = _mm_loadh_pi(t2,(__m64 *)(fptr+1));
++    t3 = _mm_load_ss(fshiftptr);
++    t3 = _mm_loadh_pi(t3,(__m64 *)(fshiftptr+1));
++
++    t2 = _mm_add_ps(t2,fix1);
++    t3 = _mm_add_ps(t3,fix1);
++
++    _mm_store_ss(fptr,t2);
++    _mm_storeh_pi((__m64 *)(fptr+1),t2);
++    _mm_store_ss(fshiftptr,t3);
++    _mm_storeh_pi((__m64 *)(fshiftptr+1),t3);
 +}
 +
++#if defined (_MSC_VER) && defined(_M_IX86)
++/* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
++#define gmx_mm_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3, \
++fptr,fshiftptr) \
++{\
++__m128 _t1,_t2,_t3,_t4;\
++\
++fix1 = _mm_hadd_ps(fix1,fiy1);\
++fiz1 = _mm_hadd_ps(fiz1,fix2);\
++fiy2 = _mm_hadd_ps(fiy2,fiz2);\
++fix3 = _mm_hadd_ps(fix3,fiy3);\
++fiz3 = _mm_hadd_ps(fiz3,fiz3);\
++fix1 = _mm_hadd_ps(fix1,fiz1);\
++fiy2 = _mm_hadd_ps(fiy2,fix3);\
++fiz3 = _mm_hadd_ps(fiz3,fiz3);\
++_mm_storeu_ps(fptr,  _mm_add_ps(fix1,_mm_loadu_ps(fptr)  ));\
++_mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));\
++_mm_store_ss (fptr+8,_mm_add_ss(fiz3,_mm_load_ss(fptr+8) ));\
++_t4 = _mm_load_ss(fshiftptr+2);\
++_t4 = _mm_loadh_pi(_t4,(__m64 *)(fshiftptr));\
++_t1 = _mm_shuffle_ps(fiz3,fix1,_MM_SHUFFLE(1,0,0,0));\
++_t2 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(3,2,2,2));\
++_t3 = _mm_shuffle_ps(fiy2,fix1,_MM_SHUFFLE(3,3,0,1));\
++_t3 = _mm_shuffle_ps(_t3,_t3,_MM_SHUFFLE(1,2,0,0));\
++_t1 = _mm_add_ps(_t1,_t2);\
++_t3 = _mm_add_ps(_t3,_t4);\
++_t1 = _mm_add_ps(_t1,_t3);\
++_mm_store_ss(fshiftptr+2,_t1);\
++_mm_storeh_pi((__m64 *)(fshiftptr),_t1);\
++}
++#else
++/* Real function for sane compilers */
 +static gmx_inline void
 +gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
 +                                      __m128 fix2, __m128 fiy2, __m128 fiz2,
 +                                      __m128 fix3, __m128 fiy3, __m128 fiz3,
 +                                      float * gmx_restrict fptr,
 +                                      float * gmx_restrict fshiftptr)
 +{
-       __m128 t1,t2,t3,t4,t5;
-       
-       fix1 = _mm_hadd_ps(fix1,fiy1);
-       fiz1 = _mm_hadd_ps(fiz1,fix2);
-       fiy2 = _mm_hadd_ps(fiy2,fiz2);
-       fix3 = _mm_hadd_ps(fix3,fiy3);
-       fiz3 = _mm_hadd_ps(fiz3,fix4);
-       fiy4 = _mm_hadd_ps(fiy4,fiz4);
-       
-       fix1 = _mm_hadd_ps(fix1,fiz1); /* fix2 fiz1 fiy1 fix1 */
-       fiy2 = _mm_hadd_ps(fiy2,fix3); /* fiy3 fix3 fiz2 fiy2 */
-       fiz3 = _mm_hadd_ps(fiz3,fiy4); /* fiz4 fiy4 fix4 fiz3 */
-     
-       _mm_storeu_ps(fptr,  _mm_add_ps(fix1,_mm_loadu_ps(fptr)  ));
-       _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));
-       _mm_storeu_ps(fptr+8,_mm_add_ps(fiz3,_mm_loadu_ps(fptr+8)));
-       
-       t5 = _mm_load_ss(fshiftptr+2);
-       t5 = _mm_loadh_pi(t5,(__m64 *)(fshiftptr));
-       
-       t1 = _mm_shuffle_ps(fix1,fix1,_MM_SHUFFLE(1,0,2,2));
-       t2 = _mm_shuffle_ps(fiy2,fiy2,_MM_SHUFFLE(3,2,1,1));
-       t3 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(2,1,0,0));
-       t4 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(0,0,3,3));
-       t4 = _mm_shuffle_ps(fiz3,t4  ,_MM_SHUFFLE(2,0,3,3));
-       
-       t1 = _mm_add_ps(t1,t2);
-       t3 = _mm_add_ps(t3,t4);
-       t1 = _mm_add_ps(t1,t3);
-       t5 = _mm_add_ps(t5,t1);
-       
-       _mm_store_ss(fshiftptr+2,t5);
-       _mm_storeh_pi((__m64 *)(fshiftptr),t5);
++    __m128 t1,t2,t3,t4;
++
++    fix1 = _mm_hadd_ps(fix1,fiy1);
++    fiz1 = _mm_hadd_ps(fiz1,fix2);
++    fiy2 = _mm_hadd_ps(fiy2,fiz2);
++    fix3 = _mm_hadd_ps(fix3,fiy3);
++    fiz3 = _mm_hadd_ps(fiz3,fiz3);
++
++    fix1 = _mm_hadd_ps(fix1,fiz1); /* fix2 fiz1 fiy1 fix1 */
++    fiy2 = _mm_hadd_ps(fiy2,fix3); /* fiy3 fix3 fiz2 fiy2 */
++    fiz3 = _mm_hadd_ps(fiz3,fiz3); /*  -    -    -   fiz3 */
++
++    _mm_storeu_ps(fptr,  _mm_add_ps(fix1,_mm_loadu_ps(fptr)  ));
++    _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));
++    _mm_store_ss (fptr+8,_mm_add_ss(fiz3,_mm_load_ss(fptr+8) ));
++
++    t4 = _mm_load_ss(fshiftptr+2);
++    t4 = _mm_loadh_pi(t4,(__m64 *)(fshiftptr));
++
++    t1 = _mm_shuffle_ps(fiz3,fix1,_MM_SHUFFLE(1,0,0,0));   /* fiy1 fix1  -   fiz3 */
++    t2 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(3,2,2,2));   /* fiy3 fix3  -   fiz1 */
++    t3 = _mm_shuffle_ps(fiy2,fix1,_MM_SHUFFLE(3,3,0,1));   /* fix2 fix2 fiy2 fiz2 */
++    t3 = _mm_shuffle_ps(t3  ,t3  ,_MM_SHUFFLE(1,2,0,0));   /* fiy2 fix2  -   fiz2 */
 +
++    t1 = _mm_add_ps(t1,t2);
++    t3 = _mm_add_ps(t3,t4);
++    t1 = _mm_add_ps(t1,t3); /* y x - z */
 +
++    _mm_store_ss(fshiftptr+2,t1);
++    _mm_storeh_pi((__m64 *)(fshiftptr),t1);
++}
++#endif
++
++#if defined (_MSC_VER) && defined(_M_IX86)
++/* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
++#define gmx_mm_update_iforce_4atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,fix4,fiy4,fiz4, \
++fptr,fshiftptr) \
++{\
++__m128 _t1,_t2,_t3,_t4,_t5;\
++\
++fix1 = _mm_hadd_ps(fix1,fiy1);\
++fiz1 = _mm_hadd_ps(fiz1,fix2);\
++fiy2 = _mm_hadd_ps(fiy2,fiz2);\
++fix3 = _mm_hadd_ps(fix3,fiy3);\
++fiz3 = _mm_hadd_ps(fiz3,fix4);\
++fiy4 = _mm_hadd_ps(fiy4,fiz4);\
++fix1 = _mm_hadd_ps(fix1,fiz1);\
++fiy2 = _mm_hadd_ps(fiy2,fix3);\
++fiz3 = _mm_hadd_ps(fiz3,fiy4);\
++_mm_storeu_ps(fptr,  _mm_add_ps(fix1,_mm_loadu_ps(fptr)  ));\
++_mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));\
++_mm_storeu_ps(fptr+8,_mm_add_ps(fiz3,_mm_loadu_ps(fptr+8)));\
++_t5 = _mm_load_ss(fshiftptr+2);\
++_t5 = _mm_loadh_pi(_t5,(__m64 *)(fshiftptr));\
++_t1 = _mm_shuffle_ps(fix1,fix1,_MM_SHUFFLE(1,0,2,2));\
++_t2 = _mm_shuffle_ps(fiy2,fiy2,_MM_SHUFFLE(3,2,1,1));\
++_t3 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(2,1,0,0));\
++_t4 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(0,0,3,3));\
++_t4 = _mm_shuffle_ps(fiz3,_t4  ,_MM_SHUFFLE(2,0,3,3));\
++_t1 = _mm_add_ps(_t1,_t2);\
++_t3 = _mm_add_ps(_t3,_t4);\
++_t1 = _mm_add_ps(_t1,_t3);\
++_t5 = _mm_add_ps(_t5,_t1);\
++_mm_store_ss(fshiftptr+2,_t5);\
++_mm_storeh_pi((__m64 *)(fshiftptr),_t5);\
++}
++#else
++/* Real function for sane compilers */
 +static gmx_inline void
 +gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
 +                                      __m128 fix2, __m128 fiy2, __m128 fiz2,
 +                                      __m128 fix3, __m128 fiy3, __m128 fiz3,
 +                                      __m128 fix4, __m128 fiy4, __m128 fiz4,
 +                                      float * gmx_restrict fptr,
 +                                      float * gmx_restrict fshiftptr)
 +{
++    __m128 t1,t2,t3,t4,t5;
++
++    fix1 = _mm_hadd_ps(fix1,fiy1);
++    fiz1 = _mm_hadd_ps(fiz1,fix2);
++    fiy2 = _mm_hadd_ps(fiy2,fiz2);
++    fix3 = _mm_hadd_ps(fix3,fiy3);
++    fiz3 = _mm_hadd_ps(fiz3,fix4);
++    fiy4 = _mm_hadd_ps(fiy4,fiz4);
++
++    fix1 = _mm_hadd_ps(fix1,fiz1); /* fix2 fiz1 fiy1 fix1 */
++    fiy2 = _mm_hadd_ps(fiy2,fix3); /* fiy3 fix3 fiz2 fiy2 */
++    fiz3 = _mm_hadd_ps(fiz3,fiy4); /* fiz4 fiy4 fix4 fiz3 */
++
++    _mm_storeu_ps(fptr,  _mm_add_ps(fix1,_mm_loadu_ps(fptr)  ));
++    _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));
++    _mm_storeu_ps(fptr+8,_mm_add_ps(fiz3,_mm_loadu_ps(fptr+8)));
++
++    t5 = _mm_load_ss(fshiftptr+2);
++    t5 = _mm_loadh_pi(t5,(__m64 *)(fshiftptr));
++
++    t1 = _mm_shuffle_ps(fix1,fix1,_MM_SHUFFLE(1,0,2,2));
++    t2 = _mm_shuffle_ps(fiy2,fiy2,_MM_SHUFFLE(3,2,1,1));
++    t3 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(2,1,0,0));
++    t4 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(0,0,3,3));
++    t4 = _mm_shuffle_ps(fiz3,t4  ,_MM_SHUFFLE(2,0,3,3));
++
++    t1 = _mm_add_ps(t1,t2);
++    t3 = _mm_add_ps(t3,t4);
++    t1 = _mm_add_ps(t1,t3);
++    t5 = _mm_add_ps(t5,t1);
++
++    _mm_store_ss(fshiftptr+2,t5);
++    _mm_storeh_pi((__m64 *)(fshiftptr),t5);
 +}
-       __m128 t1,t2;
-       t1   = _mm_movehl_ps(pot2,pot1); 
-       t2   = _mm_movelh_ps(pot1,pot2); 
-       t1   = _mm_add_ps(t1,t2);       
-       t2   = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,1,1));
-       pot1 = _mm_add_ps(t1,t2);       
-       pot2 = _mm_movehl_ps(t2,pot1);
-       _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
-       _mm_store_ss(ptrB,_mm_add_ss(pot2,_mm_load_ss(ptrB)));
- }
- static gmx_inline void
- gmx_mm_update_4pot_ps(__m128 pot1, float * gmx_restrict ptrA,
-                       __m128 pot2, float * gmx_restrict ptrB,
-                       __m128 pot3, float * gmx_restrict ptrC,
-                       __m128 pot4, float * gmx_restrict ptrD)
- {
-     _MM_TRANSPOSE4_PS(pot1,pot2,pot3,pot4);
-     pot1 = _mm_add_ps(_mm_add_ps(pot1,pot2),_mm_add_ps(pot3,pot4));
-     pot2 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(1,1,1,1));
-     pot3 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(2,2,2,2));
-     pot4 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(3,3,3,3));
-       _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
-       _mm_store_ss(ptrB,_mm_add_ss(pot2,_mm_load_ss(ptrB)));
-       _mm_store_ss(ptrC,_mm_add_ss(pot3,_mm_load_ss(ptrC)));
-       _mm_store_ss(ptrD,_mm_add_ss(pot4,_mm_load_ss(ptrD)));
++#endif
 +
 +
 +static gmx_inline void
 +gmx_mm_update_1pot_ps(__m128 pot1, float * gmx_restrict ptrA)
 +{
 +    pot1 = _mm_add_ps(pot1,_mm_movehl_ps(_mm_setzero_ps(),pot1));
 +    pot1 = _mm_add_ps(pot1,_mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(0,0,0,1)));
 +    _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
 +}
 +
 +static gmx_inline void
 +gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
 +                      __m128 pot2, float * gmx_restrict ptrB)
 +{
++    __m128 t1,t2;
++    t1   = _mm_movehl_ps(pot2,pot1);
++    t2   = _mm_movelh_ps(pot1,pot2);
++    t1   = _mm_add_ps(t1,t2);
++    t2   = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,1,1));
++    pot1 = _mm_add_ps(t1,t2);
++    pot2 = _mm_movehl_ps(t2,pot1);
++    _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
++    _mm_store_ss(ptrB,_mm_add_ss(pot2,_mm_load_ss(ptrB)));
 +}
 +
 +
 +#endif /* _kernelutil_x86_sse4_1_single_h_ */
index 70c9dde017a9be9641de8e22515d5d6a223127f6,0000000000000000000000000000000000000000..cad77484b5eacaf138049d9422083ec24ca4c989
mode 100644,000000..100644
--- /dev/null
@@@ -1,659 -1,0 +1,658 @@@
-   snew(t,x);
 +/*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * GROningen Mixture of Alchemy and Childrens' Stories
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <ctype.h>
 +#include "sysstuff.h"
 +#include "smalloc.h"
 +#include "string2.h"
 +#include "futil.h"
 +#include "maths.h"
 +#include "gmx_fatal.h"
 +#include "vec.h"
 +#include "macros.h"
 +#include "index.h"
 +#include "strdb.h"
 +#include "copyrite.h"
 +#include "tpxio.h"
 +#include "typedefs.h"
 +#include "statutil.h"
 +#include "oenv.h"
 +#include "gmxfio.h"
 +#include "xvgr.h"
 +#include "matio.h"
 +#include "gmx_ana.h"
 +#include "names.h"
 +#include "sfactor.h"
 +
 +
 +typedef struct gmx_structurefactors {
 +    int nratoms;
 +    int *p;        /* proton number */
 +    int *n;        /* neutron number */
 +    /* Parameters for the Cromer Mann fit */
 +    real **a;    /* parameter a */
 +    real **b;    /* parameter b */
 +    real *c;       /* parameter c */
 +    char **atomnm;  /* atomname */
 +
 +} gmx_structurefactors;
 +
 +typedef struct reduced_atom{
 +    rvec x;
 +    int  t;
 +} reduced_atom;
 +
 +
 +typedef struct structure_factor
 +{
 +  int     n_angles;
 +  int     n_groups;
 +  double  lambda;
 +  double  energy;
 +  double  momentum;
 +  double  ref_k;
 +  double  **F;
 +  int     nSteps;
 +  int     total_n_atoms;
 +} structure_factor;
 +
 +
 +extern int * create_indexed_atom_type (reduced_atom_t * atm, int size)
 +{
 +/*
 + * create an index of the atom types found in a  group
 + * i.e.: for water index_atp[0]=type_number_of_O and
 + *                 index_atp[1]=type_number_of_H
 + *
 + * the last element is set to 0
 + */
 +    int *index_atp, i, i_tmp, j;
 +
 +    reduced_atom *att=(reduced_atom *)atm;
 +
 +    snew (index_atp, 1);
 +    i_tmp = 1;
 +    index_atp[0] = att[0].t;
 +    for (i = 1; i < size; i++) {
 +      for (j = 0; j < i_tmp; j++)
 +          if (att[i].t == index_atp[j])
 +              break;
 +      if (j == i_tmp) {       /* i.e. no indexed atom type is  == to atm[i].t */
 +          i_tmp++;
 +          srenew (index_atp, i_tmp * sizeof (int));
 +          index_atp[i_tmp - 1] = att[i].t;
 +      }
 +    }
 +    i_tmp++;
 +    srenew (index_atp, i_tmp * sizeof (int));
 +    index_atp[i_tmp - 1] = 0;
 +    return index_atp;
 +}
 +
 +
 +
 +extern t_complex *** rc_tensor_allocation(int x, int y, int z)
 +{
 +  t_complex ***t;
 +  int i,j;
 +
 +  t = (t_complex ***)calloc(x,sizeof(t_complex**));
 +  if(!t) exit(fprintf(stderr,"\nallocation error"));
 +  t[0] = (t_complex **)calloc(x*y,sizeof(t_complex*));
 +  if(!t[0]) exit(fprintf(stderr,"\nallocation error"));
 +  t[0][0] = (t_complex *)calloc(x*y*z,sizeof(t_complex));
 +  if(!t[0][0]) exit(fprintf(stderr,"\nallocation error"));
 +
 +  for( j = 1 ; j < y ; j++)
 +    t[0][j] = t[0][j-1] + z;
 +  for( i = 1 ; i < x ; i++) {
 +    t[i] = t[i-1] + y;
 +    t[i][0] = t[i-1][0] + y*z;
 +    for( j = 1 ; j < y ; j++)
 +      t[i][j] = t[i][j-1] + z;
 +  }
 +  return t;
 +}
 +
 +
 +extern void compute_structure_factor (structure_factor_t * sft, matrix box,
 +                             reduced_atom_t * red, int isize, real start_q,
 +                             real end_q, int group,real **sf_table)
 +{
 +    structure_factor *sf=(structure_factor *)sft;
 +    reduced_atom *redt=(reduced_atom *)red;
 +
 +    t_complex ***tmpSF;
 +    rvec k_factor;
 +    real kdotx, asf, kx, ky, kz, krr;
 +    int kr, maxkx, maxky, maxkz, i, j, k, p, *counter;
 +
 +
 +    k_factor[XX] = 2 * M_PI / box[XX][XX];
 +    k_factor[YY] = 2 * M_PI / box[YY][YY];
 +    k_factor[ZZ] = 2 * M_PI / box[ZZ][ZZ];
 +
 +    maxkx = (int) (end_q / k_factor[XX] + 0.5);
 +    maxky = (int) (end_q / k_factor[YY] + 0.5);
 +    maxkz = (int) (end_q / k_factor[ZZ] + 0.5);
 +
 +    snew (counter, sf->n_angles);
 +
 +    tmpSF = rc_tensor_allocation(maxkx,maxky,maxkz);
 +/*
 + * The big loop...
 + * compute real and imaginary part of the structure factor for every
 + * (kx,ky,kz))
 + */
 +    fprintf(stderr,"\n");
 +    for (i = 0; i < maxkx; i++) {
 +      fprintf (stderr,"\rdone %3.1f%%     ", (double)(100.0*(i+1))/maxkx);
 +      kx = i * k_factor[XX];
 +      for (j = 0; j < maxky; j++) {
 +          ky = j * k_factor[YY];
 +          for (k = 0; k < maxkz; k++)
 +              if (i != 0 || j != 0 || k != 0) {
 +                  kz = k * k_factor[ZZ];
 +                  krr = sqrt (sqr (kx) + sqr (ky) + sqr (kz));
 +                  if (krr >= start_q && krr <= end_q) {
 +                      kr = (int) (krr/sf->ref_k + 0.5);
 +                      if (kr < sf->n_angles) {
 +                          counter[kr]++;  /* will be used for the copmutation
 +                                             of the average*/
 +                          for (p = 0; p < isize; p++) {
 +                              asf = sf_table[redt[p].t][kr];
 +
 +                              kdotx = kx * redt[p].x[XX] +
 +                                  ky * redt[p].x[YY] + kz * redt[p].x[ZZ];
 +
 +                              tmpSF[i][j][k].re += cos (kdotx) * asf;
 +                              tmpSF[i][j][k].im += sin (kdotx) * asf;
 +                          }
 +                      }
 +                  }
 +              }
 +      }
 +    }                         /* end loop on i */
 +/*
 + *  compute the square modulus of the structure factor, averaging on the surface
 + *  kx*kx + ky*ky + kz*kz = krr*krr
 + *  note that this is correct only for a (on the macroscopic scale)
 + *  isotropic system.
 + */
 +    for (i = 0; i < maxkx; i++) {
 +      kx = i * k_factor[XX]; for (j = 0; j < maxky; j++) {
 +          ky = j * k_factor[YY]; for (k = 0; k < maxkz; k++) {
 +              kz = k * k_factor[ZZ]; krr = sqrt (sqr (kx) + sqr (ky)
 +              + sqr (kz)); if (krr >= start_q && krr <= end_q) {
 +                  kr = (int) (krr / sf->ref_k + 0.5);
 +                      if (kr < sf->n_angles && counter[kr] != 0)
 +                              sf->F[group][kr] +=
 +                          (sqr (tmpSF[i][j][k].re) +
 +                           sqr (tmpSF[i][j][k].im))/ counter[kr];
 +              }
 +          }
 +      }
 +    } sfree (counter); free(tmpSF[0][0]); free(tmpSF[0]); free(tmpSF);
 +}
 +
 +
 +extern gmx_structurefactors_t *gmx_structurefactors_init(const char *datfn) {
 +    
 +      /* Read the database for the structure factor of the different atoms */
 +
 +    FILE *fp;
 +    char line[STRLEN];
 +    gmx_structurefactors *gsf;
 +    double a1,a2,a3,a4,b1,b2,b3,b4,c;
 +    int p;
 +    int i;
 +    int nralloc=10;
 +    int line_no;
 +    char atomn[32];
 +    fp=libopen(datfn);
 +    line_no = 0;
 +    snew(gsf,1);
 +
 +    snew(gsf->atomnm,nralloc);
 +    snew(gsf->a,nralloc);
 +    snew(gsf->b,nralloc);
 +    snew(gsf->c,nralloc);
 +    snew(gsf->p,nralloc);
 +    gsf->n=NULL;
 +    gsf->nratoms=line_no;
 +    while(get_a_line(fp,line,STRLEN)) {
 +        i=line_no;
 +        if (sscanf(line,"%s %d %lf %lf %lf %lf %lf %lf %lf %lf %lf",
 +                   atomn,&p,&a1,&a2,&a3,&a4,&b1,&b2,&b3,&b4,&c) == 11) {
 +            gsf->atomnm[i]=strdup(atomn);
 +            gsf->p[i]=p;
 +            snew(gsf->a[i],4);
 +            snew(gsf->b[i],4);
 +            gsf->a[i][0]=a1;
 +            gsf->a[i][1]=a2;
 +            gsf->a[i][2]=a3;
 +            gsf->a[i][3]=a4;
 +            gsf->b[i][0]=b1;
 +            gsf->b[i][1]=b2;
 +            gsf->b[i][2]=b3;
 +            gsf->b[i][3]=b4;
 +            gsf->c[i]=c;
 +            line_no++;            
 +            gsf->nratoms=line_no;
 +            if (line_no==nralloc){
 +              nralloc+=10;
 +                srenew(gsf->atomnm,nralloc);
 +                srenew(gsf->a,nralloc);
 +                srenew(gsf->b,nralloc);
 +                srenew(gsf->c,nralloc);
 +                srenew(gsf->p,nralloc);
 +            }
 +        }
 +        else 
 +            fprintf(stderr,"WARNING: Error in file %s at line %d ignored\n",
 +                    datfn,line_no);
 +    }
 +
 +    srenew(gsf->atomnm,gsf->nratoms);
 +    srenew(gsf->a,gsf->nratoms);
 +    srenew(gsf->b,gsf->nratoms);
 +    srenew(gsf->c,gsf->nratoms);
 +    srenew(gsf->p,gsf->nratoms);
 +
 +    fclose(fp);
 +
 +    return (gmx_structurefactors_t *) gsf;
 +
 +}
 +
 +
 +extern void rearrange_atoms (reduced_atom_t * positions, t_trxframe *fr, atom_id * index,
 +                    int isize, t_topology * top, gmx_bool flag,gmx_structurefactors_t *gsf)
 +/* given the group's index, return the (continuous) array of atoms */
 +{
 +  int i;
 +
 +  reduced_atom *pos=(reduced_atom *)positions;
 +
 +  if (flag)
 +    for (i = 0; i < isize; i++)
 +      pos[i].t =
 +      return_atom_type (*(top->atoms.atomname[index[i]]),gsf);
 +  for (i = 0; i < isize; i++)
 +    copy_rvec (fr->x[index[i]], pos[i].x);
 +
 +   positions=(reduced_atom_t *)pos;
 +}
 +
 +
 +extern int return_atom_type (const char *name,gmx_structurefactors_t *gsf)
 +{
 +  typedef struct {
 +    const char *name;
 +    int  nh;
 +  } t_united_h;
 +  t_united_h uh[] = {
 +    { "CH1", 1 }, { "CH2", 2 }, { "CH3", 3 },
 +    { "CS1", 1 }, { "CS2", 2 }, { "CS3", 3 },
 +    { "CP1", 1 }, { "CP2", 2 }, { "CP3", 3 }
 +  };
 +  int i,cnt=0;
 +  int *tndx;
 +  int nrc;
 +  int fndx=0;
 +  int NCMT;
 +
 +  gmx_structurefactors *gsft=(gmx_structurefactors *)gsf;
 +
 +  NCMT=gsft->nratoms;
 +
 +  snew(tndx,NCMT);
 +
 +  for(i=0; (i<asize(uh)); i++)
 +    if (strcmp(name,uh[i].name) == 0)
 +      return NCMT-1+uh[i].nh;
 +
 +  for(i=0; (i<NCMT); i++){
 +    if (strncmp (name, gsft->atomnm[i],strlen(gsft->atomnm[i])) == 0){
 +      tndx[cnt]=i;
 +      cnt++;
 +    }
 +  }
 +
 +  if (cnt==0)
 +  gmx_fatal(FARGS,"\nError: atom (%s) not in list (%d types checked)!\n",
 +          name,i);
 +  else{
 +        nrc=0;
 +        for(i=0;i<cnt;i++){
 +                if(strlen(gsft->atomnm[tndx[i]])>(size_t)nrc){
 +                        nrc=strlen(gsft->atomnm[tndx[i]]);
 +                    fndx=tndx[i];
 +                }
 +        }
 +     
 +        return fndx;
 +  }
 +
 +  return 0;
 +}
 +
 +extern int gmx_structurefactors_get_sf(gmx_structurefactors_t *gsf, int elem, real a[4], real b[4], real *c){
 +
 +      int success;
 +      int i;
 +    gmx_structurefactors *gsft=(gmx_structurefactors *)gsf;
 +      success=0;
 +
 +      for(i=0;i<4;i++){
 +            a[i]=gsft->a[elem][i];
 +            b[i]=gsft->b[elem][i];
 +            *c=gsft->c[elem];
 +        }
 +
 +        success+=1;
 +      return success;
 +}
 +
 +extern int do_scattering_intensity (const char* fnTPS, const char* fnNDX,
 +                             const char* fnXVG, const char *fnTRX,
 +                             const char* fnDAT,
 +                             real start_q,real end_q,
 +                             real energy,int ng,const output_env_t oenv)
 +{
 +    int i,*isize,flags = TRX_READ_X,**index_atp;
 +    t_trxstatus *status;
 +    char **grpname,title[STRLEN];
 +    atom_id **index;
 +    t_topology top;
 +    int ePBC;
 +    t_trxframe fr;
 +    reduced_atom_t **red;
 +    structure_factor *sf;
 +    rvec *xtop;
 +    real **sf_table;
 +    int nsftable;
 +    matrix box;
 +    double r_tmp;
 +
 +    gmx_structurefactors_t *gmx_sf;
 +    real *a,*b,c;
 +    int success;
 +
 +    snew(a,4);
 +    snew(b,4);
 +
 +
 +    gmx_sf=gmx_structurefactors_init(fnDAT);
 +
 +    success=gmx_structurefactors_get_sf(gmx_sf,0, a, b, &c);
 +
 +    snew (sf, 1);
 +    sf->energy = energy;
 +
 +    /* Read the topology informations */
 +    read_tps_conf (fnTPS, title, &top, &ePBC, &xtop, NULL, box, TRUE);
 +    sfree (xtop);
 +
 +    /* groups stuff... */
 +    snew (isize, ng);
 +    snew (index, ng);
 +    snew (grpname, ng);
 +
 +    fprintf (stderr, "\nSelect %d group%s\n", ng,
 +           ng == 1 ? "" : "s");
 +    if (fnTPS)
 +      get_index (&top.atoms, fnNDX, ng, isize, index, grpname);
 +    else
 +      rd_index (fnNDX, ng, isize, index, grpname);
 +
 +    /* The first time we read data is a little special */
 +    read_first_frame (oenv,&status, fnTRX, &fr, flags);
 +
 +    sf->total_n_atoms = fr.natoms;
 +
 +    snew (red, ng);
 +    snew (index_atp, ng);
 +
 +    r_tmp = max (box[XX][XX], box[YY][YY]);
 +    r_tmp = (double) max (box[ZZ][ZZ], r_tmp);
 +
 +    sf->ref_k = (2.0 * M_PI) / (r_tmp);
 +    /* ref_k will be the reference momentum unit */
 +    sf->n_angles = (int) (end_q / sf->ref_k + 0.5);
 +
 +    snew (sf->F, ng);
 +    for (i = 0; i < ng; i++)
 +      snew (sf->F[i], sf->n_angles);
 +    for (i = 0; i < ng; i++) {
 +      snew (red[i], isize[i]);
 +      rearrange_atoms (red[i], &fr, index[i], isize[i], &top, TRUE,gmx_sf);
 +      index_atp[i] = create_indexed_atom_type (red[i], isize[i]);
 +    }
 +
 +    sf_table = compute_scattering_factor_table (gmx_sf,(structure_factor_t *)sf,&nsftable);
 +
 +
 +    /* This is the main loop over frames */
 +
 +    do {
 +      sf->nSteps++;
 +      for (i = 0; i < ng; i++) {
 +          rearrange_atoms (red[i], &fr, index[i], isize[i], &top,FALSE,gmx_sf);
 +
 +          compute_structure_factor ((structure_factor_t *)sf, box, red[i], isize[i],
 +                                    start_q, end_q, i, sf_table);
 +      }
 +    }
 +
 +    while (read_next_frame (oenv,status, &fr));
 +
 +    save_data ((structure_factor_t *)sf, fnXVG, ng, start_q, end_q,oenv);
 +
 +
 +    sfree(a);
 +    sfree(b);
 +
 +    gmx_structurefactors_done(gmx_sf);
 +
 +    return 0;
 +}
 +
 +
 +extern void save_data (structure_factor_t *sft, const char *file, int ngrps,
 +                real start_q, real end_q, const output_env_t oenv)
 +{
 +
 +    FILE *fp;
 +    int i, g = 0;
 +    double *tmp, polarization_factor, A;
 +
 +    structure_factor *sf=(structure_factor *)sft;
 +    
 +    fp = xvgropen (file, "Scattering Intensity", "q (1/nm)",
 +                 "Intensity (a.u.)",oenv);
 +
 +    snew (tmp, ngrps);
 +
 +    for (g = 0; g < ngrps; g++)
 +      for (i = 0; i < sf->n_angles; i++) {
 +            
 +/*
 + *          theta is half the angle between incoming and scattered vectors.
 + *
 + *          polar. fact. = 0.5*(1+cos^2(2*theta)) = 1 - 0.5 * sin^2(2*theta)
 + *
 + *          sin(theta) = q/(2k) := A  ->  sin^2(theta) = 4*A^2 (1-A^2) ->
 + *          -> 0.5*(1+cos^2(2*theta)) = 1 - 2 A^2 (1-A^2)
 + */
 +          A = (double) (i * sf->ref_k) / (2.0 * sf->momentum);
 +          polarization_factor = 1 - 2.0 * sqr (A) * (1 - sqr (A));
 +          sf->F[g][i] *= polarization_factor;
 +      }
 +    for (i = 0; i < sf->n_angles; i++) {
 +      if (i * sf->ref_k >= start_q && i * sf->ref_k <= end_q) {
 +          fprintf (fp, "%10.5f  ", i * sf->ref_k);
 +          for (g = 0; g < ngrps; g++)
 +               fprintf (fp, "  %10.5f ", (sf->F[g][i]) /( sf->total_n_atoms*
 +                                                        sf->nSteps));
 +          fprintf (fp, "\n");
 +      }
 +    }
 +
 +    ffclose (fp);
 +}
 +
 +
 +extern double CMSF (gmx_structurefactors_t *gsf,int type,int nh,double lambda, double sin_theta)
 +/*
 + * return Cromer-Mann fit for the atomic scattering factor:
 + * sin_theta is the sine of half the angle between incoming and scattered
 + * vectors. See g_sq.h for a short description of CM fit.
 + */
 +{
 +  int i,success;
 +  double tmp = 0.0, k2;
 +  real *a,*b;
 +  real c;
 +
 +  snew(a,4);
 +  snew(b,4);
 +
 + /*
 +  *
 +  * f0[k] = c + [SUM a_i*EXP(-b_i*(k^2)) ]
 +  *             i=1,4
 +  */
 +
 +  /*
 +   *  united atoms case
 +   *  CH2 / CH3 groups
 +   */
 +  if (nh > 0) {
 +    tmp = (CMSF (gsf,return_atom_type ("C",gsf),0,lambda, sin_theta) +
 +         nh*CMSF (gsf,return_atom_type ("H",gsf),0,lambda, sin_theta));
 +  }
 +  /* all atom case */
 +  else {
 +    k2 = (sqr (sin_theta) / sqr (10.0 * lambda));
 +    success=gmx_structurefactors_get_sf(gsf,type,a,b,&c);
 +    tmp = c;
 +    for (i = 0; (i < 4); i++)
 +      tmp += a[i] * exp (-b[i] * k2);
 +  }
 +  return tmp;
 +}
 +
 +
 +
 +extern real **gmx_structurefactors_table(gmx_structurefactors_t *gsf,real momentum, real ref_k, real lambda, int n_angles){
 +
 +      int NCMT;
 +      int nsftable;
 +      int i,j;
 +      double q,sin_theta;
 +    real **sf_table;
 +    gmx_structurefactors *gsft=(gmx_structurefactors *)gsf;
 +
 +    NCMT=gsft->nratoms;
 +      nsftable = NCMT+3;
 +
 +    snew (sf_table,nsftable);
 +    for (i = 0; (i < nsftable); i++) {
 +      snew (sf_table[i], n_angles);
 +      for (j = 0; j < n_angles; j++) {
 +              q = ((double) j * ref_k);
 +              /* theta is half the angle between incoming
 +                         and scattered wavevectors. */
 +              sin_theta = q / (2.0 * momentum);
 +              if (i < NCMT){
 +                    sf_table[i][j] = CMSF (gsf,i,0,lambda, sin_theta);
 +                }
 +                else
 +                      sf_table[i][j] = CMSF (gsf,i,i-NCMT+1,lambda, sin_theta);
 +      }
 +    }
 +    return sf_table;
 +}
 +
 +extern void gmx_structurefactors_done(gmx_structurefactors_t *gsf){
 +
 +      int i;
 +      gmx_structurefactors *sf;
 +      sf=(gmx_structurefactors *) gsf;
 +
 +      for(i=0;i<sf->nratoms;i++){
 +        sfree(sf->a[i]);
 +              sfree(sf->b[i]);
 +              sfree(sf->atomnm[i]);
 +      }
 +
 +      sfree(sf->a);
 +      sfree(sf->b);
 +      sfree(sf->atomnm);
 +      sfree(sf->p);
 +      sfree(sf->c);
 +
 +      sfree(sf);
 +
 +}
 +
 +extern real **compute_scattering_factor_table (gmx_structurefactors_t *gsf,structure_factor_t *sft,int *nsftable)
 +{
 +/*
 + *  this function build up a table of scattering factors for every atom
 + *  type and for every scattering angle.
 + */
 +   
 +    double hc=1239.842;
 +    real ** sf_table;
 +
 +    structure_factor *sf=(structure_factor *)sft;
 +
 +
 +    /* \hbar \omega \lambda = hc = 1239.842 eV * nm */
 +    sf->momentum = ((double) (2. * 1000.0 * M_PI * sf->energy) / hc);
 +    sf->lambda = hc / (1000.0 * sf->energy);
 +    fprintf (stderr, "\nwavelenght = %f nm\n", sf->lambda);
 +
 +    sf_table=gmx_structurefactors_table(gsf,sf->momentum,sf->ref_k,sf->lambda,sf->n_angles);
 +
 +    return sf_table;
 +}
 +
 +
Simple merge
index 46d7c47f2fc508e7fd35ca1bf68f3ed94b242120,0000000000000000000000000000000000000000..da20b6700cdffbc1eea3dc902f393f1119f5060e
mode 100644,000000..100644
--- /dev/null
@@@ -1,3458 -1,0 +1,3458 @@@
-   RTYPE ("rlist",     ir->rlist,      -1);
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <ctype.h>
 +#include <stdlib.h>
 +#include <limits.h>
 +#include "sysstuff.h"
 +#include "smalloc.h"
 +#include "typedefs.h"
 +#include "physics.h"
 +#include "names.h"
 +#include "gmx_fatal.h"
 +#include "macros.h"
 +#include "index.h"
 +#include "symtab.h"
 +#include "string2.h"
 +#include "readinp.h"
 +#include "warninp.h"
 +#include "readir.h" 
 +#include "toputil.h"
 +#include "index.h"
 +#include "network.h"
 +#include "vec.h"
 +#include "pbc.h"
 +#include "mtop_util.h"
 +#include "chargegroup.h"
 +#include "inputrec.h"
 +
 +#define MAXPTR 254
 +#define NOGID  255
 +#define MAXLAMBDAS 1024
 +
 +/* Resource parameters 
 + * Do not change any of these until you read the instruction
 + * in readinp.h. Some cpp's do not take spaces after the backslash
 + * (like the c-shell), which will give you a very weird compiler
 + * message.
 + */
 +
 +static char tcgrps[STRLEN],tau_t[STRLEN],ref_t[STRLEN],
 +  acc[STRLEN],accgrps[STRLEN],freeze[STRLEN],frdim[STRLEN],
 +  energy[STRLEN],user1[STRLEN],user2[STRLEN],vcm[STRLEN],xtc_grps[STRLEN],
 +  couple_moltype[STRLEN],orirefitgrp[STRLEN],egptable[STRLEN],egpexcl[STRLEN],
 +  wall_atomtype[STRLEN],wall_density[STRLEN],deform[STRLEN],QMMM[STRLEN];
 +static char fep_lambda[efptNR][STRLEN];
 +static char lambda_weights[STRLEN];
 +static char **pull_grp;
 +static char **rot_grp;
 +static char anneal[STRLEN],anneal_npoints[STRLEN],
 +  anneal_time[STRLEN],anneal_temp[STRLEN];
 +static char QMmethod[STRLEN],QMbasis[STRLEN],QMcharge[STRLEN],QMmult[STRLEN],
 +  bSH[STRLEN],CASorbitals[STRLEN], CASelectrons[STRLEN],SAon[STRLEN],
 +  SAoff[STRLEN],SAsteps[STRLEN],bTS[STRLEN],bOPT[STRLEN]; 
 +static char efield_x[STRLEN],efield_xt[STRLEN],efield_y[STRLEN],
 +  efield_yt[STRLEN],efield_z[STRLEN],efield_zt[STRLEN];
 +
 +enum {
 +    egrptpALL,         /* All particles have to be a member of a group.     */
 +    egrptpALL_GENREST, /* A rest group with name is generated for particles *
 +                        * that are not part of any group.                   */
 +    egrptpPART,        /* As egrptpALL_GENREST, but no name is generated    *
 +                        * for the rest group.                               */
 +    egrptpONE          /* Merge all selected groups into one group,         *
 +                        * make a rest group for the remaining particles.    */
 +};
 +
 +
 +void init_ir(t_inputrec *ir, t_gromppopts *opts)
 +{
 +  snew(opts->include,STRLEN); 
 +  snew(opts->define,STRLEN);
 +  snew(ir->fepvals,1);
 +  snew(ir->expandedvals,1);
 +  snew(ir->simtempvals,1);
 +}
 +
 +static void GetSimTemps(int ntemps, t_simtemp *simtemp, double *temperature_lambdas)
 +{
 +
 +    int i;
 +
 +    for (i=0;i<ntemps;i++)
 +    {
 +        /* simple linear scaling -- allows more control */
 +        if (simtemp->eSimTempScale == esimtempLINEAR)
 +        {
 +            simtemp->temperatures[i] = simtemp->simtemp_low + (simtemp->simtemp_high-simtemp->simtemp_low)*temperature_lambdas[i];
 +        }
 +        else if (simtemp->eSimTempScale == esimtempGEOMETRIC)  /* should give roughly equal acceptance for constant heat capacity . . . */
 +        {
 +            simtemp->temperatures[i] = simtemp->simtemp_low * pow(simtemp->simtemp_high/simtemp->simtemp_low,(1.0*i)/(ntemps-1));
 +        }
 +        else if (simtemp->eSimTempScale == esimtempEXPONENTIAL)
 +        {
 +            simtemp->temperatures[i] = simtemp->simtemp_low + (simtemp->simtemp_high-simtemp->simtemp_low)*((exp(temperature_lambdas[i])-1)/(exp(1.0)-1));
 +        }
 +        else
 +        {
 +            char errorstr[128];
 +            sprintf(errorstr,"eSimTempScale=%d not defined",simtemp->eSimTempScale);
 +            gmx_fatal(FARGS,errorstr);
 +        }
 +    }
 +}
 +
 +
 +
 +static void _low_check(gmx_bool b,char *s,warninp_t wi)
 +{
 +    if (b)
 +    {
 +        warning_error(wi,s);
 +    }
 +}
 +
 +static void check_nst(const char *desc_nst,int nst,
 +                      const char *desc_p,int *p,
 +                      warninp_t wi)
 +{
 +    char buf[STRLEN];
 +
 +    if (*p > 0 && *p % nst != 0)
 +    {
 +        /* Round up to the next multiple of nst */
 +        *p = ((*p)/nst + 1)*nst;
 +        sprintf(buf,"%s should be a multiple of %s, changing %s to %d\n",
 +              desc_p,desc_nst,desc_p,*p);
 +        warning(wi,buf);
 +    }
 +}
 +
 +static gmx_bool ir_NVE(const t_inputrec *ir)
 +{
 +    return ((ir->eI == eiMD || EI_VV(ir->eI)) && ir->etc == etcNO);
 +}
 +
 +static int lcd(int n1,int n2)
 +{
 +    int d,i;
 +    
 +    d = 1;
 +    for(i=2; (i<=n1 && i<=n2); i++)
 +    {
 +        if (n1 % i == 0 && n2 % i == 0)
 +        {
 +            d = i;
 +        }
 +    }
 +    
 +  return d;
 +}
 +
 +static void process_interaction_modifier(const t_inputrec *ir,int *eintmod)
 +{
 +    if (*eintmod == eintmodPOTSHIFT_VERLET)
 +    {
 +        if (ir->cutoff_scheme == ecutsVERLET)
 +        {
 +            *eintmod = eintmodPOTSHIFT;
 +        }
 +        else
 +        {
 +            *eintmod = eintmodNONE;
 +        }
 +    }
 +}
 +
 +void check_ir(const char *mdparin,t_inputrec *ir, t_gromppopts *opts,
 +              warninp_t wi)
 +/* Check internal consistency */
 +{
 +    /* Strange macro: first one fills the err_buf, and then one can check 
 +     * the condition, which will print the message and increase the error
 +     * counter.
 +     */
 +#define CHECK(b) _low_check(b,err_buf,wi)
 +    char err_buf[256],warn_buf[STRLEN];
 +    int i,j;
 +    int  ns_type=0;
 +    real dt_coupl=0;
 +    real dt_pcoupl;
 +    int  nstcmin;
 +    t_lambda *fep = ir->fepvals;
 +    t_expanded *expand = ir->expandedvals;
 +
 +  set_warning_line(wi,mdparin,-1);
 +
 +    /* BASIC CUT-OFF STUFF */
 +    if (ir->rcoulomb < 0)
 +    {
 +        warning_error(wi,"rcoulomb should be >= 0");
 +    }
 +    if (ir->rvdw < 0)
 +    {
 +        warning_error(wi,"rvdw should be >= 0");
 +    }
 +    if (ir->rlist < 0 &&
 +        !(ir->cutoff_scheme == ecutsVERLET && ir->verletbuf_drift > 0))
 +    {
 +        warning_error(wi,"rlist should be >= 0");
 +    }
 +
 +    process_interaction_modifier(ir,&ir->coulomb_modifier);
 +    process_interaction_modifier(ir,&ir->vdw_modifier);
 +
 +    if (ir->cutoff_scheme == ecutsGROUP)
 +    {
 +        /* BASIC CUT-OFF STUFF */
 +        if (ir->rlist == 0 ||
 +            !((EEL_MIGHT_BE_ZERO_AT_CUTOFF(ir->coulombtype) && ir->rcoulomb > ir->rlist) ||
 +              (EVDW_MIGHT_BE_ZERO_AT_CUTOFF(ir->vdwtype)    && ir->rvdw     > ir->rlist))) {
 +            /* No switched potential and/or no twin-range:
 +             * we can set the long-range cut-off to the maximum of the other cut-offs.
 +             */
 +            ir->rlistlong = max_cutoff(ir->rlist,max_cutoff(ir->rvdw,ir->rcoulomb));
 +        }
 +        else if (ir->rlistlong < 0)
 +        {
 +            ir->rlistlong = max_cutoff(ir->rlist,max_cutoff(ir->rvdw,ir->rcoulomb));
 +            sprintf(warn_buf,"rlistlong was not set, setting it to %g (no buffer)",
 +                    ir->rlistlong);
 +            warning(wi,warn_buf);
 +        }
 +        if (ir->rlistlong == 0 && ir->ePBC != epbcNONE)
 +        {
 +            warning_error(wi,"Can not have an infinite cut-off with PBC");
 +        }
 +        if (ir->rlistlong > 0 && (ir->rlist == 0 || ir->rlistlong < ir->rlist))
 +        {
 +            warning_error(wi,"rlistlong can not be shorter than rlist");
 +        }
 +        if (IR_TWINRANGE(*ir) && ir->nstlist <= 0)
 +        {
 +            warning_error(wi,"Can not have nstlist<=0 with twin-range interactions");
 +        }
 +    }
 +    
 +    if(ir->rlistlong == ir->rlist)
 +    {
 +        ir->nstcalclr = 0;
 +    }
 +    else if(ir->rlistlong>ir->rlist && ir->nstcalclr==0)
 +    {
 +        warning_error(wi,"With different cutoffs for electrostatics and VdW, nstcalclr must be -1 or a positive number");
 +    }
 +    
 +    if (ir->cutoff_scheme == ecutsVERLET)
 +    {
 +        real rc_max;
 +
 +        /* Normal Verlet type neighbor-list, currently only limited feature support */
 +        if (inputrec2nboundeddim(ir) < 3)
 +        {
 +            warning_error(wi,"With Verlet lists only full pbc or pbc=xy with walls is supported");
 +        }
 +        if (ir->rcoulomb != ir->rvdw)
 +        {
 +            warning_error(wi,"With Verlet lists rcoulomb!=rvdw is not supported");
 +        }
 +        if (ir->vdwtype != evdwCUT)
 +        {
 +            warning_error(wi,"With Verlet lists only cut-off LJ interactions are supported");
 +        }
 +        if (!(ir->coulombtype == eelCUT ||
 +              (EEL_RF(ir->coulombtype) && ir->coulombtype != eelRF_NEC) ||
 +              EEL_PME(ir->coulombtype) || ir->coulombtype == eelEWALD))
 +        {
 +            warning_error(wi,"With Verlet lists only cut-off, reaction-field, PME and Ewald electrostatics are supported");
 +        }
 +
 +        if (ir->nstlist <= 0)
 +        {
 +             warning_error(wi,"With Verlet lists nstlist should be larger than 0");
 +        }
 +
 +        if (ir->nstlist < 10)
 +        {
 +            warning_note(wi,"With Verlet lists the optimal nstlist is >= 10, with GPUs >= 20. Note that with the Verlet scheme, nstlist has no effect on the accuracy of your simulation.");
 +        }
 +
 +        rc_max = max(ir->rvdw,ir->rcoulomb);
 +
 +        if (ir->verletbuf_drift <= 0)
 +        {
 +            if (ir->verletbuf_drift == 0)
 +            {
 +                warning_error(wi,"Can not have an energy drift of exactly 0");
 +            }
 +
 +            if (ir->rlist < rc_max)
 +            {
 +                warning_error(wi,"With verlet lists rlist can not be smaller than rvdw or rcoulomb");
 +            }
 +            
 +            if (ir->rlist == rc_max && ir->nstlist > 1)
 +            {
 +                warning_note(wi,"rlist is equal to rvdw and/or rcoulomb: there is no explicit Verlet buffer. The cluster pair list does have a buffering effect, but choosing a larger rlist might be necessary for good energy conservation.");
 +            }
 +        }
 +        else
 +        {
 +            if (ir->rlist > rc_max)
 +            {
 +                warning_note(wi,"You have set rlist larger than the interaction cut-off, but you also have verlet-buffer-drift > 0. Will set rlist using verlet-buffer-drift.");
 +            }
 +
 +            if (ir->nstlist == 1)
 +            {
 +                /* No buffer required */
 +                ir->rlist = rc_max;
 +            }
 +            else
 +            {
 +                if (EI_DYNAMICS(ir->eI))
 +                {
 +                    if (EI_MD(ir->eI) && ir->etc == etcNO)
 +                    {
 +                        warning_error(wi,"Temperature coupling is required for calculating rlist using the energy drift with verlet-buffer-drift > 0. Either use temperature coupling or set rlist yourself together with verlet-buffer-drift = -1."); 
 +                    }
 +
 +                    if (inputrec2nboundeddim(ir) < 3)
 +                    {
 +                        warning_error(wi,"The box volume is required for calculating rlist from the energy drift with verlet-buffer-drift > 0. You are using at least one unbounded dimension, so no volume can be computed. Either use a finite box, or set rlist yourself together with verlet-buffer-drift = -1.");
 +                    }
 +                    /* Set rlist temporarily so we can continue processing */
 +                    ir->rlist = rc_max;
 +                }
 +                else
 +                {
 +                    /* Set the buffer to 5% of the cut-off */
 +                    ir->rlist = 1.05*rc_max;
 +                }
 +            }
 +        }
 +
 +        /* No twin-range calculations with Verlet lists */
 +        ir->rlistlong = ir->rlist;
 +    }
 +
 +    if(ir->nstcalclr==-1)
 +    {
 +        /* if rlist=rlistlong, this will later be changed to nstcalclr=0 */
 +        ir->nstcalclr = ir->nstlist;
 +    }
 +    else if(ir->nstcalclr>0)
 +    {
 +        if(ir->nstlist>0 && (ir->nstlist % ir->nstcalclr != 0))
 +        {
 +            warning_error(wi,"nstlist must be evenly divisible by nstcalclr. Use nstcalclr = -1 to automatically follow nstlist");
 +        }
 +    }
 +    else if(ir->nstcalclr<-1)
 +    {
 +        warning_error(wi,"nstcalclr must be a positive number (divisor of nstcalclr), or -1 to follow nstlist.");
 +    }
 +    
 +    if(EEL_PME(ir->coulombtype) && ir->rcoulomb > ir->rvdw && ir->nstcalclr>1)
 +    {
 +        warning_error(wi,"When used with PME, the long-range component of twin-range interactions must be updated every step (nstcalclr)");
 +    }
 +       
 +    /* GENERAL INTEGRATOR STUFF */
 +    if (!(ir->eI == eiMD || EI_VV(ir->eI)))
 +    {
 +        ir->etc = etcNO;
 +    }
 +    if (ir->eI == eiVVAK) {
 +        sprintf(warn_buf,"Integrator method %s is implemented primarily for validation purposes; for molecular dynamics, you should probably be using %s or %s",ei_names[eiVVAK],ei_names[eiMD],ei_names[eiVV]);
 +        warning_note(wi,warn_buf);
 +    }
 +    if (!EI_DYNAMICS(ir->eI))
 +    {
 +        ir->epc = epcNO;
 +    }
 +    if (EI_DYNAMICS(ir->eI))
 +    {
 +        if (ir->nstcalcenergy < 0)
 +        {
 +            ir->nstcalcenergy = ir_optimal_nstcalcenergy(ir);
 +            if (ir->nstenergy != 0 && ir->nstenergy < ir->nstcalcenergy)
 +            {
 +                /* nstcalcenergy larger than nstener does not make sense.
 +                 * We ideally want nstcalcenergy=nstener.
 +                 */
 +                if (ir->nstlist > 0)
 +                {
 +                    ir->nstcalcenergy = lcd(ir->nstenergy,ir->nstlist);
 +                }
 +                else
 +                {
 +                    ir->nstcalcenergy = ir->nstenergy;
 +                }
 +            }
 +        }
 +        else if (ir->nstenergy > 0 && ir->nstcalcenergy > ir->nstenergy)
 +        {
 +            /* If the user sets nstenergy small, we should respect that */
 +            sprintf(warn_buf,"Setting nstcalcenergy (%d) equal to nstenergy (%d)",ir->nstcalcenergy,ir->nstenergy);
 +            ir->nstcalcenergy = ir->nstenergy;
 +        }
 +
 +        if (ir->epc != epcNO)
 +        {
 +            if (ir->nstpcouple < 0)
 +            {
 +                ir->nstpcouple = ir_optimal_nstpcouple(ir);
 +            }
 +        }
 +        if (IR_TWINRANGE(*ir))
 +        {
 +            check_nst("nstlist",ir->nstlist,
 +                      "nstcalcenergy",&ir->nstcalcenergy,wi);
 +            if (ir->epc != epcNO)
 +            {
 +                check_nst("nstlist",ir->nstlist,
 +                          "nstpcouple",&ir->nstpcouple,wi); 
 +            }
 +        }
 +
 +        if (ir->nstcalcenergy > 1)
 +        {
 +            /* for storing exact averages nstenergy should be
 +             * a multiple of nstcalcenergy
 +             */
 +            check_nst("nstcalcenergy",ir->nstcalcenergy,
 +                      "nstenergy",&ir->nstenergy,wi);
 +            if (ir->efep != efepNO)
 +            {
 +                /* nstdhdl should be a multiple of nstcalcenergy */
 +                check_nst("nstcalcenergy",ir->nstcalcenergy,
 +                          "nstdhdl",&ir->fepvals->nstdhdl,wi);
 +                /* nstexpanded should be a multiple of nstcalcenergy */
 +                check_nst("nstcalcenergy",ir->nstcalcenergy,
 +                          "nstdhdl",&ir->expandedvals->nstexpanded,wi);
 +            }
 +        }
 +    }
 +
 +  /* LD STUFF */
 +  if ((EI_SD(ir->eI) || ir->eI == eiBD) &&
 +      ir->bContinuation && ir->ld_seed != -1) {
 +      warning_note(wi,"You are doing a continuation with SD or BD, make sure that ld_seed is different from the previous run (using ld_seed=-1 will ensure this)");
 +  }
 +
 +  /* TPI STUFF */
 +  if (EI_TPI(ir->eI)) {
 +    sprintf(err_buf,"TPI only works with pbc = %s",epbc_names[epbcXYZ]);
 +    CHECK(ir->ePBC != epbcXYZ);
 +    sprintf(err_buf,"TPI only works with ns = %s",ens_names[ensGRID]);
 +    CHECK(ir->ns_type != ensGRID);
 +    sprintf(err_buf,"with TPI nstlist should be larger than zero");
 +    CHECK(ir->nstlist <= 0);
 +    sprintf(err_buf,"TPI does not work with full electrostatics other than PME");
 +    CHECK(EEL_FULL(ir->coulombtype) && !EEL_PME(ir->coulombtype));
 +  }
 +
 +  /* SHAKE / LINCS */
 +  if ( (opts->nshake > 0) && (opts->bMorse) ) {
 +      sprintf(warn_buf,
 +              "Using morse bond-potentials while constraining bonds is useless");
 +      warning(wi,warn_buf);
 +  }
 +
 +  if ((EI_SD(ir->eI) || ir->eI == eiBD) &&
 +      ir->bContinuation && ir->ld_seed != -1) {
 +      warning_note(wi,"You are doing a continuation with SD or BD, make sure that ld_seed is different from the previous run (using ld_seed=-1 will ensure this)");
 +  }
 +  /* verify simulated tempering options */
 +
 +  if (ir->bSimTemp) {
 +      gmx_bool bAllTempZero = TRUE;
 +      for (i=0;i<fep->n_lambda;i++)
 +      {
 +          sprintf(err_buf,"Entry %d for %s must be between 0 and 1, instead is %g",i,efpt_names[efptTEMPERATURE],fep->all_lambda[efptTEMPERATURE][i]);
 +          CHECK((fep->all_lambda[efptTEMPERATURE][i] < 0) || (fep->all_lambda[efptTEMPERATURE][i] > 1));
 +          if (fep->all_lambda[efptTEMPERATURE][i] > 0)
 +          {
 +              bAllTempZero = FALSE;
 +          }
 +      }
 +      sprintf(err_buf,"if simulated tempering is on, temperature-lambdas may not be all zero");
 +      CHECK(bAllTempZero==TRUE);
 +
 +      sprintf(err_buf,"Simulated tempering is currently only compatible with md-vv");
 +      CHECK(ir->eI != eiVV);
 +
 +      /* check compatability of the temperature coupling with simulated tempering */
 +
 +      if (ir->etc == etcNOSEHOOVER) {
 +          sprintf(warn_buf,"Nose-Hoover based temperature control such as [%s] my not be entirelyconsistent with simulated tempering",etcoupl_names[ir->etc]);
 +          warning_note(wi,warn_buf);
 +      }
 +
 +      /* check that the temperatures make sense */
 +
 +      sprintf(err_buf,"Higher simulated tempering temperature (%g) must be >= than the simulated tempering lower temperature (%g)",ir->simtempvals->simtemp_high,ir->simtempvals->simtemp_low);
 +      CHECK(ir->simtempvals->simtemp_high <= ir->simtempvals->simtemp_low);
 +
 +      sprintf(err_buf,"Higher simulated tempering temperature (%g) must be >= zero",ir->simtempvals->simtemp_high);
 +      CHECK(ir->simtempvals->simtemp_high <= 0);
 +
 +      sprintf(err_buf,"Lower simulated tempering temperature (%g) must be >= zero",ir->simtempvals->simtemp_low);
 +      CHECK(ir->simtempvals->simtemp_low <= 0);
 +  }
 +
 +  /* verify free energy options */
 +
 +  if (ir->efep != efepNO) {
 +      fep = ir->fepvals;
 +      sprintf(err_buf,"The soft-core power is %d and can only be 1 or 2",
 +              fep->sc_power);
 +      CHECK(fep->sc_alpha!=0 && fep->sc_power!=1 && fep->sc_power!=2);
 +
 +      sprintf(err_buf,"The soft-core sc-r-power is %d and can only be 6 or 48",
 +              (int)fep->sc_r_power);
 +      CHECK(fep->sc_alpha!=0 && fep->sc_r_power!=6.0 && fep->sc_r_power!=48.0);
 +
 +      /* check validity of options */
 +      if (fep->n_lambda > 0 && ir->rlist < max(ir->rvdw,ir->rcoulomb))
 +      {
 +          sprintf(warn_buf,
 +                  "For foreign lambda free energy differences it is assumed that the soft-core interactions have no effect beyond the neighborlist cut-off");
 +          warning(wi,warn_buf);
 +      }
 +
 +      sprintf(err_buf,"Can't use postive delta-lambda (%g) if initial state/lambda does not start at zero",fep->delta_lambda);
 +      CHECK(fep->delta_lambda > 0 && ((fep->init_fep_state !=0) ||  (fep->init_lambda !=0)));
 +
 +      sprintf(err_buf,"Can't use postive delta-lambda (%g) with expanded ensemble simulations",fep->delta_lambda);
 +      CHECK(fep->delta_lambda > 0 && (ir->efep == efepEXPANDED));
 +
 +      sprintf(err_buf,"Free-energy not implemented for Ewald");
 +      CHECK(ir->coulombtype==eelEWALD);
 +
 +      /* check validty of lambda inputs */
 +      sprintf(err_buf,"initial thermodynamic state %d does not exist, only goes to %d",fep->init_fep_state,fep->n_lambda);
 +      CHECK((fep->init_fep_state > fep->n_lambda));
 +
 +      for (j=0;j<efptNR;j++)
 +      {
 +          for (i=0;i<fep->n_lambda;i++)
 +          {
 +              sprintf(err_buf,"Entry %d for %s must be between 0 and 1, instead is %g",i,efpt_names[j],fep->all_lambda[j][i]);
 +              CHECK((fep->all_lambda[j][i] < 0) || (fep->all_lambda[j][i] > 1));
 +          }
 +      }
 +
 +      if ((fep->sc_alpha>0) && (!fep->bScCoul))
 +      {
 +          for (i=0;i<fep->n_lambda;i++)
 +          {
 +              sprintf(err_buf,"For state %d, vdw-lambdas (%f) is changing with vdw softcore, while coul-lambdas (%f) is nonzero without coulomb softcore: this will lead to crashes, and is not supported.",i,fep->all_lambda[efptVDW][i],
 +                      fep->all_lambda[efptCOUL][i]);
 +              CHECK((fep->sc_alpha>0) &&
 +                    (((fep->all_lambda[efptCOUL][i] > 0.0) &&
 +                      (fep->all_lambda[efptCOUL][i] < 1.0)) &&
 +                     ((fep->all_lambda[efptVDW][i] > 0.0) &&
 +                      (fep->all_lambda[efptVDW][i] < 1.0))));
 +          }
 +      }
 +
 +      if ((fep->bScCoul) && (EEL_PME(ir->coulombtype)))
 +      {
 +          sprintf(warn_buf,"With coulomb soft core, the reciprocal space calculation will not necessarily cancel.  It may be necessary to decrease the reciprocal space energy, and increase the cutoff radius to get sufficiently close matches to energies with free energy turned off.");
 +          warning(wi, warn_buf);
 +      }
 +
 +      /*  Free Energy Checks -- In an ideal world, slow growth and FEP would
 +          be treated differently, but that's the next step */
 +
 +      for (i=0;i<efptNR;i++) {
 +          for (j=0;j<fep->n_lambda;j++) {
 +              sprintf(err_buf,"%s[%d] must be between 0 and 1",efpt_names[i],j);
 +              CHECK((fep->all_lambda[i][j] < 0) || (fep->all_lambda[i][j] > 1));
 +          }
 +      }
 +  }
 +
 +  if ((ir->bSimTemp) || (ir->efep == efepEXPANDED)) {
 +      fep = ir->fepvals;
 +      expand = ir->expandedvals;
 +
 +      /* checking equilibration of weights inputs for validity */
 +
 +      sprintf(err_buf,"weight-equil-number-all-lambda (%d) is ignored if lmc-weights-equil is not equal to %s",
 +              expand->equil_n_at_lam,elmceq_names[elmceqNUMATLAM]);
 +      CHECK((expand->equil_n_at_lam>0) && (expand->elmceq!=elmceqNUMATLAM));
 +
 +      sprintf(err_buf,"weight-equil-number-samples (%d) is ignored if lmc-weights-equil is not equal to %s",
 +              expand->equil_samples,elmceq_names[elmceqSAMPLES]);
 +      CHECK((expand->equil_samples>0) && (expand->elmceq!=elmceqSAMPLES));
 +
 +      sprintf(err_buf,"weight-equil-number-steps (%d) is ignored if lmc-weights-equil is not equal to %s",
 +              expand->equil_steps,elmceq_names[elmceqSTEPS]);
 +      CHECK((expand->equil_steps>0) && (expand->elmceq!=elmceqSTEPS));
 +
 +      sprintf(err_buf,"weight-equil-wl-delta (%d) is ignored if lmc-weights-equil is not equal to %s",
 +              expand->equil_samples,elmceq_names[elmceqWLDELTA]);
 +      CHECK((expand->equil_wl_delta>0) && (expand->elmceq!=elmceqWLDELTA));
 +
 +      sprintf(err_buf,"weight-equil-count-ratio (%f) is ignored if lmc-weights-equil is not equal to %s",
 +              expand->equil_ratio,elmceq_names[elmceqRATIO]);
 +      CHECK((expand->equil_ratio>0) && (expand->elmceq!=elmceqRATIO));
 +
 +      sprintf(err_buf,"weight-equil-number-all-lambda (%d) must be a positive integer if lmc-weights-equil=%s",
 +              expand->equil_n_at_lam,elmceq_names[elmceqNUMATLAM]);
 +      CHECK((expand->equil_n_at_lam<=0) && (expand->elmceq==elmceqNUMATLAM));
 +
 +      sprintf(err_buf,"weight-equil-number-samples (%d) must be a positive integer if lmc-weights-equil=%s",
 +              expand->equil_samples,elmceq_names[elmceqSAMPLES]);
 +      CHECK((expand->equil_samples<=0) && (expand->elmceq==elmceqSAMPLES));
 +
 +      sprintf(err_buf,"weight-equil-number-steps (%d) must be a positive integer if lmc-weights-equil=%s",
 +              expand->equil_steps,elmceq_names[elmceqSTEPS]);
 +      CHECK((expand->equil_steps<=0) && (expand->elmceq==elmceqSTEPS));
 +
 +      sprintf(err_buf,"weight-equil-wl-delta (%f) must be > 0 if lmc-weights-equil=%s",
 +              expand->equil_wl_delta,elmceq_names[elmceqWLDELTA]);
 +      CHECK((expand->equil_wl_delta<=0) && (expand->elmceq==elmceqWLDELTA));
 +
 +      sprintf(err_buf,"weight-equil-count-ratio (%f) must be > 0 if lmc-weights-equil=%s",
 +              expand->equil_ratio,elmceq_names[elmceqRATIO]);
 +      CHECK((expand->equil_ratio<=0) && (expand->elmceq==elmceqRATIO));
 +
 +      sprintf(err_buf,"lmc-weights-equil=%s only possible when lmc-stats = %s or lmc-stats %s",
 +              elmceq_names[elmceqWLDELTA],elamstats_names[elamstatsWL],elamstats_names[elamstatsWWL]);
 +      CHECK((expand->elmceq==elmceqWLDELTA) && (!EWL(expand->elamstats)));
 +
 +      sprintf(err_buf,"lmc-repeats (%d) must be greater than 0",expand->lmc_repeats);
 +      CHECK((expand->lmc_repeats <= 0));
 +      sprintf(err_buf,"minimum-var-min (%d) must be greater than 0",expand->minvarmin);
 +      CHECK((expand->minvarmin <= 0));
 +      sprintf(err_buf,"weight-c-range (%d) must be greater or equal to 0",expand->c_range);
 +      CHECK((expand->c_range < 0));
 +      sprintf(err_buf,"init-lambda-state (%d) must be zero if lmc-forced-nstart (%d)> 0 and lmc-move != 'no'",
 +              fep->init_fep_state, expand->lmc_forced_nstart);
 +      CHECK((fep->init_fep_state!=0) && (expand->lmc_forced_nstart>0) && (expand->elmcmove!=elmcmoveNO));
 +      sprintf(err_buf,"lmc-forced-nstart (%d) must not be negative",expand->lmc_forced_nstart);
 +      CHECK((expand->lmc_forced_nstart < 0));
 +      sprintf(err_buf,"init-lambda-state (%d) must be in the interval [0,number of lambdas)",fep->init_fep_state);
 +      CHECK((fep->init_fep_state < 0) || (fep->init_fep_state >= fep->n_lambda));
 +
 +      sprintf(err_buf,"init-wl-delta (%f) must be greater than or equal to 0",expand->init_wl_delta);
 +      CHECK((expand->init_wl_delta < 0));
 +      sprintf(err_buf,"wl-ratio (%f) must be between 0 and 1",expand->wl_ratio);
 +      CHECK((expand->wl_ratio <= 0) || (expand->wl_ratio >= 1));
 +      sprintf(err_buf,"wl-scale (%f) must be between 0 and 1",expand->wl_scale);
 +      CHECK((expand->wl_scale <= 0) || (expand->wl_scale >= 1));
 +
 +      /* if there is no temperature control, we need to specify an MC temperature */
 +      sprintf(err_buf,"If there is no temperature control, and lmc-mcmove!= 'no',mc_temperature must be set to a positive number");
 +      if (expand->nstTij > 0)
 +      {
 +          sprintf(err_buf,"nst-transition-matrix (%d) must be an integer multiple of nstlog (%d)",
 +                  expand->nstTij,ir->nstlog);
 +          CHECK((mod(expand->nstTij,ir->nstlog)!=0));
 +      }
 +  }
 +
 +  /* PBC/WALLS */
 +  sprintf(err_buf,"walls only work with pbc=%s",epbc_names[epbcXY]);
 +  CHECK(ir->nwall && ir->ePBC!=epbcXY);
 +
 +  /* VACUUM STUFF */
 +  if (ir->ePBC != epbcXYZ && ir->nwall != 2) {
 +    if (ir->ePBC == epbcNONE) {
 +      if (ir->epc != epcNO) {
 +          warning(wi,"Turning off pressure coupling for vacuum system");
 +          ir->epc = epcNO;
 +      }
 +    } else {
 +      sprintf(err_buf,"Can not have pressure coupling with pbc=%s",
 +            epbc_names[ir->ePBC]);
 +      CHECK(ir->epc != epcNO);
 +    }
 +    sprintf(err_buf,"Can not have Ewald with pbc=%s",epbc_names[ir->ePBC]);
 +    CHECK(EEL_FULL(ir->coulombtype));
 +
 +    sprintf(err_buf,"Can not have dispersion correction with pbc=%s",
 +          epbc_names[ir->ePBC]);
 +    CHECK(ir->eDispCorr != edispcNO);
 +  }
 +
 +  if (ir->rlist == 0.0) {
 +    sprintf(err_buf,"can only have neighborlist cut-off zero (=infinite)\n"
 +          "with coulombtype = %s or coulombtype = %s\n"
 +          "without periodic boundary conditions (pbc = %s) and\n"
 +          "rcoulomb and rvdw set to zero",
 +          eel_names[eelCUT],eel_names[eelUSER],epbc_names[epbcNONE]);
 +    CHECK(((ir->coulombtype != eelCUT) && (ir->coulombtype != eelUSER)) ||
 +        (ir->ePBC     != epbcNONE) ||
 +        (ir->rcoulomb != 0.0)      || (ir->rvdw != 0.0));
 +
 +    if (ir->nstlist < 0) {
 +        warning_error(wi,"Can not have heuristic neighborlist updates without cut-off");
 +    }
 +    if (ir->nstlist > 0) {
 +        warning_note(wi,"Simulating without cut-offs is usually (slightly) faster with nstlist=0, nstype=simple and particle decomposition");
 +    }
 +  }
 +
 +  /* COMM STUFF */
 +  if (ir->nstcomm == 0) {
 +    ir->comm_mode = ecmNO;
 +  }
 +  if (ir->comm_mode != ecmNO) {
 +    if (ir->nstcomm < 0) {
 +        warning(wi,"If you want to remove the rotation around the center of mass, you should set comm_mode = Angular instead of setting nstcomm < 0. nstcomm is modified to its absolute value");
 +      ir->nstcomm = abs(ir->nstcomm);
 +    }
 +
 +    if (ir->nstcalcenergy > 0 && ir->nstcomm < ir->nstcalcenergy) {
 +        warning_note(wi,"nstcomm < nstcalcenergy defeats the purpose of nstcalcenergy, setting nstcomm to nstcalcenergy");
 +        ir->nstcomm = ir->nstcalcenergy;
 +    }
 +
 +    if (ir->comm_mode == ecmANGULAR) {
 +      sprintf(err_buf,"Can not remove the rotation around the center of mass with periodic molecules");
 +      CHECK(ir->bPeriodicMols);
 +      if (ir->ePBC != epbcNONE)
 +          warning(wi,"Removing the rotation around the center of mass in a periodic system (this is not a problem when you have only one molecule).");
 +    }
 +  }
 +
 +  if (EI_STATE_VELOCITY(ir->eI) && ir->ePBC == epbcNONE && ir->comm_mode != ecmANGULAR) {
 +      warning_note(wi,"Tumbling and or flying ice-cubes: We are not removing rotation around center of mass in a non-periodic system. You should probably set comm_mode = ANGULAR.");
 +  }
 +  
 +  sprintf(err_buf,"Twin-range neighbour searching (NS) with simple NS"
 +        " algorithm not implemented");
 +  CHECK(((ir->rcoulomb > ir->rlist) || (ir->rvdw > ir->rlist))
 +      && (ir->ns_type == ensSIMPLE));
 +
 +  /* TEMPERATURE COUPLING */
 +  if (ir->etc == etcYES)
 +    {
 +        ir->etc = etcBERENDSEN;
 +        warning_note(wi,"Old option for temperature coupling given: "
 +                     "changing \"yes\" to \"Berendsen\"\n");
 +    }
 +
 +    if ((ir->etc == etcNOSEHOOVER) || (ir->epc == epcMTTK))
 +    {
 +        if (ir->opts.nhchainlength < 1)
 +        {
 +            sprintf(warn_buf,"number of Nose-Hoover chains (currently %d) cannot be less than 1,reset to 1\n",ir->opts.nhchainlength);
 +            ir->opts.nhchainlength =1;
 +            warning(wi,warn_buf);
 +        }
 +        
 +        if (ir->etc==etcNOSEHOOVER && !EI_VV(ir->eI) && ir->opts.nhchainlength > 1)
 +        {
 +            warning_note(wi,"leapfrog does not yet support Nose-Hoover chains, nhchainlength reset to 1");
 +            ir->opts.nhchainlength = 1;
 +        }
 +    }
 +    else
 +    {
 +        ir->opts.nhchainlength = 0;
 +    }
 +
 +    if (ir->eI == eiVVAK) {
 +        sprintf(err_buf,"%s implemented primarily for validation, and requires nsttcouple = 1 and nstpcouple = 1.",
 +                ei_names[eiVVAK]);
 +        CHECK((ir->nsttcouple != 1) || (ir->nstpcouple != 1));
 +    }
 +
 +    if (ETC_ANDERSEN(ir->etc))
 +    {
 +        sprintf(err_buf,"%s temperature control not supported for integrator %s.",etcoupl_names[ir->etc],ei_names[ir->eI]);
 +        CHECK(!(EI_VV(ir->eI)));
 +
 +        for (i=0;i<ir->opts.ngtc;i++)
 +        {
 +            sprintf(err_buf,"all tau_t must currently be equal using Andersen temperature control, violated for group %d",i);
 +            CHECK(ir->opts.tau_t[0] != ir->opts.tau_t[i]);
 +            sprintf(err_buf,"all tau_t must be postive using Andersen temperature control, tau_t[%d]=%10.6f",
 +                    i,ir->opts.tau_t[i]);
 +            CHECK(ir->opts.tau_t[i]<0);
 +        }
 +        if (ir->nstcomm > 0 && (ir->etc == etcANDERSEN)) {
 +            sprintf(warn_buf,"Center of mass removal not necessary for %s.  All velocities of coupled groups are rerandomized periodically, so flying ice cube errors will not occur.",etcoupl_names[ir->etc]);
 +            warning_note(wi,warn_buf);
 +        }
 +
 +        sprintf(err_buf,"nstcomm must be 1, not %d for %s, as velocities of atoms in coupled groups are randomized every time step",ir->nstcomm,etcoupl_names[ir->etc]);
 +        CHECK(ir->nstcomm > 1 && (ir->etc == etcANDERSEN));
 +
 +        for (i=0;i<ir->opts.ngtc;i++)
 +        {
 +            int nsteps = (int)(ir->opts.tau_t[i]/ir->delta_t);
 +            sprintf(err_buf,"tau_t/delta_t for group %d for temperature control method %s must be a multiple of nstcomm (%d), as velocities of atoms in coupled groups are randomized every time step. The input tau_t (%8.3f) leads to %d steps per randomization",i,etcoupl_names[ir->etc],ir->nstcomm,ir->opts.tau_t[i],nsteps);
 +            CHECK((nsteps % ir->nstcomm) && (ir->etc == etcANDERSENMASSIVE));
 +        }
 +    }
 +    if (ir->etc == etcBERENDSEN)
 +    {
 +        sprintf(warn_buf,"The %s thermostat does not generate the correct kinetic energy distribution. You might want to consider using the %s thermostat.",
 +                ETCOUPLTYPE(ir->etc),ETCOUPLTYPE(etcVRESCALE));
 +        warning_note(wi,warn_buf);
 +    }
 +
 +    if ((ir->etc==etcNOSEHOOVER || ETC_ANDERSEN(ir->etc))
 +        && ir->epc==epcBERENDSEN)
 +    {
 +        sprintf(warn_buf,"Using Berendsen pressure coupling invalidates the "
 +                "true ensemble for the thermostat");
 +        warning(wi,warn_buf);
 +    }
 +
 +    /* PRESSURE COUPLING */
 +    if (ir->epc == epcISOTROPIC)
 +    {
 +        ir->epc = epcBERENDSEN;
 +        warning_note(wi,"Old option for pressure coupling given: "
 +                     "changing \"Isotropic\" to \"Berendsen\"\n"); 
 +    }
 +
 +    if (ir->epc != epcNO)
 +    {
 +        dt_pcoupl = ir->nstpcouple*ir->delta_t;
 +
 +        sprintf(err_buf,"tau-p must be > 0 instead of %g\n",ir->tau_p);
 +        CHECK(ir->tau_p <= 0);
 +
 +        if (ir->tau_p/dt_pcoupl < pcouple_min_integration_steps(ir->epc))
 +        {
 +            sprintf(warn_buf,"For proper integration of the %s barostat, tau-p (%g) should be at least %d times larger than nstpcouple*dt (%g)",
 +                    EPCOUPLTYPE(ir->epc),ir->tau_p,pcouple_min_integration_steps(ir->epc),dt_pcoupl);
 +            warning(wi,warn_buf);
 +        }
 +
 +        sprintf(err_buf,"compressibility must be > 0 when using pressure"
 +                " coupling %s\n",EPCOUPLTYPE(ir->epc));
 +        CHECK(ir->compress[XX][XX] < 0 || ir->compress[YY][YY] < 0 ||
 +              ir->compress[ZZ][ZZ] < 0 ||
 +              (trace(ir->compress) == 0 && ir->compress[YY][XX] <= 0 &&
 +               ir->compress[ZZ][XX] <= 0 && ir->compress[ZZ][YY] <= 0));
 +        
 +        if (epcPARRINELLORAHMAN == ir->epc && opts->bGenVel)
 +        {
 +            sprintf(warn_buf,
 +                    "You are generating velocities so I am assuming you "
 +                    "are equilibrating a system. You are using "
 +                    "%s pressure coupling, but this can be "
 +                    "unstable for equilibration. If your system crashes, try "
 +                    "equilibrating first with Berendsen pressure coupling. If "
 +                    "you are not equilibrating the system, you can probably "
 +                    "ignore this warning.",
 +                    epcoupl_names[ir->epc]);
 +            warning(wi,warn_buf);
 +        }
 +    }
 +
 +    if (EI_VV(ir->eI))
 +    {
 +        if (ir->epc > epcNO)
 +        {
 +            if ((ir->epc!=epcBERENDSEN) && (ir->epc!=epcMTTK))
 +            {
 +                warning_error(wi,"for md-vv and md-vv-avek, can only use Berendsen and Martyna-Tuckerman-Tobias-Klein (MTTK) equations for pressure control; MTTK is equivalent to Parrinello-Rahman.");
 +            }
 +        }
 +    }
 +
 +  /* ELECTROSTATICS */
 +  /* More checks are in triple check (grompp.c) */
 +
 +  if (ir->coulombtype == eelSWITCH) {
 +    sprintf(warn_buf,"coulombtype = %s is only for testing purposes and can lead to serious "
 +            "artifacts, advice: use coulombtype = %s",
 +          eel_names[ir->coulombtype],
 +          eel_names[eelRF_ZERO]);
 +    warning(wi,warn_buf);
 +  }
 +
 +  if (ir->epsilon_r!=1 && ir->implicit_solvent==eisGBSA) {
 +    sprintf(warn_buf,"epsilon-r = %g with GB implicit solvent, will use this value for inner dielectric",ir->epsilon_r);
 +    warning_note(wi,warn_buf);
 +  }
 +
 +  if (EEL_RF(ir->coulombtype) && ir->epsilon_rf==1 && ir->epsilon_r!=1) {
 +    sprintf(warn_buf,"epsilon-r = %g and epsilon-rf = 1 with reaction field, proceeding assuming old format and exchanging epsilon-r and epsilon-rf",ir->epsilon_r);
 +    warning(wi,warn_buf);
 +    ir->epsilon_rf = ir->epsilon_r;
 +    ir->epsilon_r  = 1.0;
 +  }
 +
 +  if (getenv("GALACTIC_DYNAMICS") == NULL) {  
 +    sprintf(err_buf,"epsilon-r must be >= 0 instead of %g\n",ir->epsilon_r);
 +    CHECK(ir->epsilon_r < 0);
 +  }
 +  
 +  if (EEL_RF(ir->coulombtype)) {
 +    /* reaction field (at the cut-off) */
 +    
 +    if (ir->coulombtype == eelRF_ZERO) {
 +       sprintf(warn_buf,"With coulombtype = %s, epsilon-rf must be 0, assuming you meant epsilon_rf=0",
 +             eel_names[ir->coulombtype]);
 +        CHECK(ir->epsilon_rf != 0);
 +        ir->epsilon_rf = 0.0;
 +    }
 +
 +    sprintf(err_buf,"epsilon-rf must be >= epsilon-r");
 +    CHECK((ir->epsilon_rf < ir->epsilon_r && ir->epsilon_rf != 0) ||
 +        (ir->epsilon_r == 0));
 +    if (ir->epsilon_rf == ir->epsilon_r) {
 +      sprintf(warn_buf,"Using epsilon-rf = epsilon-r with %s does not make sense",
 +            eel_names[ir->coulombtype]);
 +      warning(wi,warn_buf);
 +    }
 +  }
 +  /* Allow rlist>rcoulomb for tabulated long range stuff. This just
 +   * means the interaction is zero outside rcoulomb, but it helps to
 +   * provide accurate energy conservation.
 +   */
 +  if (EEL_MIGHT_BE_ZERO_AT_CUTOFF(ir->coulombtype)) {
 +    if (EEL_SWITCHED(ir->coulombtype)) {
 +      sprintf(err_buf,
 +            "With coulombtype = %s rcoulomb_switch must be < rcoulomb. Or, better: Use the potential modifier options!",
 +            eel_names[ir->coulombtype]);
 +      CHECK(ir->rcoulomb_switch >= ir->rcoulomb);
 +    }
 +  } else if (ir->coulombtype == eelCUT || EEL_RF(ir->coulombtype)) {
 +      if (ir->cutoff_scheme == ecutsGROUP && ir->coulomb_modifier == eintmodNONE) {
 +          sprintf(err_buf,"With coulombtype = %s, rcoulomb should be >= rlist unless you use a potential modifier",
 +                  eel_names[ir->coulombtype]);
 +          CHECK(ir->rlist > ir->rcoulomb);
 +      }
 +  }
 +
 +  if(ir->coulombtype==eelSWITCH || ir->coulombtype==eelSHIFT ||
 +     ir->vdwtype==evdwSWITCH || ir->vdwtype==evdwSHIFT)
 +  {
 +      sprintf(warn_buf,
 +              "The switch/shift interaction settings are just for compatibility; you will get better"
 +              "performance from applying potential modifiers to your interactions!\n");
 +      warning_note(wi,warn_buf);
 +  }
 +
 +  if (EEL_FULL(ir->coulombtype))
 +  {
 +      if (ir->coulombtype==eelPMESWITCH || ir->coulombtype==eelPMEUSER ||
 +          ir->coulombtype==eelPMEUSERSWITCH)
 +      {
 +          sprintf(err_buf,"With coulombtype = %s, rcoulomb must be <= rlist",
 +                  eel_names[ir->coulombtype]);
 +          CHECK(ir->rcoulomb > ir->rlist);
 +      }
 +      else if (ir->cutoff_scheme == ecutsGROUP && ir->coulomb_modifier == eintmodNONE)
 +      {
 +          if (ir->coulombtype == eelPME || ir->coulombtype == eelP3M_AD)
 +          {
 +              sprintf(err_buf,
 +                      "With coulombtype = %s (without modifier), rcoulomb must be equal to rlist,\n"
 +                      "or rlistlong if nstcalclr=1. For optimal energy conservation,consider using\n"
 +                      "a potential modifier.",eel_names[ir->coulombtype]);
 +              if(ir->nstcalclr==1)
 +              {
 +                  CHECK(ir->rcoulomb != ir->rlist && ir->rcoulomb != ir->rlistlong);
 +              }
 +              else
 +              {
 +                  CHECK(ir->rcoulomb != ir->rlist);
 +              }
 +          }
 +      }
 +  }
 +
 +  if (EEL_PME(ir->coulombtype)) {
 +    if (ir->pme_order < 3) {
 +        warning_error(wi,"pme-order can not be smaller than 3");
 +    }
 +  }
 +
 +  if (ir->nwall==2 && EEL_FULL(ir->coulombtype)) {
 +    if (ir->ewald_geometry == eewg3D) {
 +      sprintf(warn_buf,"With pbc=%s you should use ewald-geometry=%s",
 +            epbc_names[ir->ePBC],eewg_names[eewg3DC]);
 +      warning(wi,warn_buf);
 +    }
 +    /* This check avoids extra pbc coding for exclusion corrections */
 +    sprintf(err_buf,"wall-ewald-zfac should be >= 2");
 +    CHECK(ir->wall_ewald_zfac < 2);
 +  }
 +
 +  if (EVDW_SWITCHED(ir->vdwtype)) {
 +    sprintf(err_buf,"With vdwtype = %s rvdw-switch must be < rvdw. Or, better - use a potential modifier.",
 +          evdw_names[ir->vdwtype]);
 +    CHECK(ir->rvdw_switch >= ir->rvdw);
 +  } else if (ir->vdwtype == evdwCUT) {
 +      if (ir->cutoff_scheme == ecutsGROUP && ir->vdw_modifier == eintmodNONE) {
 +          sprintf(err_buf,"With vdwtype = %s, rvdw must be >= rlist unless you use a potential modifier",evdw_names[ir->vdwtype]);
 +          CHECK(ir->rlist > ir->rvdw);
 +      }
 +  }
 +    if (ir->cutoff_scheme == ecutsGROUP)
 +    {
 +        if (EEL_IS_ZERO_AT_CUTOFF(ir->coulombtype)
 +            && (ir->rlistlong <= ir->rcoulomb))
 +        {
 +            sprintf(warn_buf,"For energy conservation with switch/shift potentials, %s should be 0.1 to 0.3 nm larger than rcoulomb.",
 +                    IR_TWINRANGE(*ir) ? "rlistlong" : "rlist");
 +            warning_note(wi,warn_buf);
 +        }
 +        if (EVDW_SWITCHED(ir->vdwtype) && (ir->rlistlong <= ir->rvdw))
 +        {
 +            sprintf(warn_buf,"For energy conservation with switch/shift potentials, %s should be 0.1 to 0.3 nm larger than rvdw.",
 +                    IR_TWINRANGE(*ir) ? "rlistlong" : "rlist");
 +            warning_note(wi,warn_buf);
 +        }
 +    }
 +
 +  if (ir->vdwtype == evdwUSER && ir->eDispCorr != edispcNO) {
 +      warning_note(wi,"You have selected user tables with dispersion correction, the dispersion will be corrected to -C6/r^6 beyond rvdw_switch (the tabulated interaction between rvdw_switch and rvdw will not be double counted). Make sure that you really want dispersion correction to -C6/r^6.");
 +  }
 +
 +  if (ir->nstlist == -1) {
 +    sprintf(err_buf,"With nstlist=-1 rvdw and rcoulomb should be smaller than rlist to account for diffusion and possibly charge-group radii");
 +    CHECK(ir->rvdw >= ir->rlist || ir->rcoulomb >= ir->rlist);
 +  }
 +  sprintf(err_buf,"nstlist can not be smaller than -1");
 +  CHECK(ir->nstlist < -1);
 +
 +  if (ir->eI == eiLBFGS && (ir->coulombtype==eelCUT || ir->vdwtype==evdwCUT)
 +     && ir->rvdw != 0) {
 +    warning(wi,"For efficient BFGS minimization, use switch/shift/pme instead of cut-off.");
 +  }
 +
 +  if (ir->eI == eiLBFGS && ir->nbfgscorr <= 0) {
 +    warning(wi,"Using L-BFGS with nbfgscorr<=0 just gets you steepest descent.");
 +  }
 +
 +    /* ENERGY CONSERVATION */
 +    if (ir_NVE(ir) && ir->cutoff_scheme == ecutsGROUP)
 +    {
 +        if (!EVDW_MIGHT_BE_ZERO_AT_CUTOFF(ir->vdwtype) && ir->rvdw > 0 && ir->vdw_modifier == eintmodNONE)
 +        {
 +            sprintf(warn_buf,"You are using a cut-off for VdW interactions with NVE, for good energy conservation use vdwtype = %s (possibly with DispCorr)",
 +                    evdw_names[evdwSHIFT]);
 +            warning_note(wi,warn_buf);
 +        }
 +        if (!EEL_MIGHT_BE_ZERO_AT_CUTOFF(ir->coulombtype) && ir->rcoulomb > 0 && ir->coulomb_modifier == eintmodNONE)
 +        {
 +            sprintf(warn_buf,"You are using a cut-off for electrostatics with NVE, for good energy conservation use coulombtype = %s or %s",
 +                    eel_names[eelPMESWITCH],eel_names[eelRF_ZERO]);
 +            warning_note(wi,warn_buf);
 +        }
 +    }
 +
 +  /* IMPLICIT SOLVENT */
 +  if(ir->coulombtype==eelGB_NOTUSED)
 +  {
 +    ir->coulombtype=eelCUT;
 +    ir->implicit_solvent=eisGBSA;
 +    fprintf(stderr,"Note: Old option for generalized born electrostatics given:\n"
 +          "Changing coulombtype from \"generalized-born\" to \"cut-off\" and instead\n"
 +            "setting implicit-solvent value to \"GBSA\" in input section.\n");
 +  }
 +
 +  if(ir->sa_algorithm==esaSTILL)
 +  {
 +    sprintf(err_buf,"Still SA algorithm not available yet, use %s or %s instead\n",esa_names[esaAPPROX],esa_names[esaNO]);
 +    CHECK(ir->sa_algorithm == esaSTILL);
 +  }
 +  
 +  if(ir->implicit_solvent==eisGBSA)
 +  {
 +    sprintf(err_buf,"With GBSA implicit solvent, rgbradii must be equal to rlist.");
 +    CHECK(ir->rgbradii != ir->rlist);
 +        
 +    if(ir->coulombtype!=eelCUT)
 +        {
 +                sprintf(err_buf,"With GBSA, coulombtype must be equal to %s\n",eel_names[eelCUT]);
 +                CHECK(ir->coulombtype!=eelCUT);
 +        }
 +        if(ir->vdwtype!=evdwCUT)
 +        {
 +                sprintf(err_buf,"With GBSA, vdw-type must be equal to %s\n",evdw_names[evdwCUT]);
 +                CHECK(ir->vdwtype!=evdwCUT);
 +        }
 +    if(ir->nstgbradii<1)
 +    {
 +      sprintf(warn_buf,"Using GBSA with nstgbradii<1, setting nstgbradii=1");
 +      warning_note(wi,warn_buf);
 +      ir->nstgbradii=1;
 +    }
 +    if(ir->sa_algorithm==esaNO)
 +    {
 +      sprintf(warn_buf,"No SA (non-polar) calculation requested together with GB. Are you sure this is what you want?\n");
 +      warning_note(wi,warn_buf);
 +    }
 +    if(ir->sa_surface_tension<0 && ir->sa_algorithm!=esaNO)
 +    {
 +      sprintf(warn_buf,"Value of sa_surface_tension is < 0. Changing it to 2.05016 or 2.25936 kJ/nm^2/mol for Still and HCT/OBC respectively\n");
 +      warning_note(wi,warn_buf);
 +      
 +      if(ir->gb_algorithm==egbSTILL)
 +      {
 +        ir->sa_surface_tension = 0.0049 * CAL2JOULE * 100;
 +      }
 +      else
 +      {
 +        ir->sa_surface_tension = 0.0054 * CAL2JOULE * 100;
 +      }
 +    }
 +    if(ir->sa_surface_tension==0 && ir->sa_algorithm!=esaNO)
 +    {
 +      sprintf(err_buf, "Surface tension set to 0 while SA-calculation requested\n");
 +      CHECK(ir->sa_surface_tension==0 && ir->sa_algorithm!=esaNO);
 +    }
 +    
 +  }
 +
 +    if (ir->bAdress)
 +    {
 +        if (ir->cutoff_scheme != ecutsGROUP)
 +        {
 +            warning_error(wi,"AdresS simulation supports only cutoff-scheme=group");
 +        }
 +        if (!EI_SD(ir->eI))
 +        {
 +            warning_error(wi,"AdresS simulation supports only stochastic dynamics");
 +        }
 +        if (ir->epc != epcNO)
 +        {
 +            warning_error(wi,"AdresS simulation does not support pressure coupling");
 +        }
 +        if (EEL_FULL(ir->coulombtype))
 +        {
 +            warning_error(wi,"AdresS simulation does not support long-range electrostatics");
 +        }
 +    }
 +}
 +
 +/* count the number of text elemets separated by whitespace in a string.
 +    str = the input string
 +    maxptr = the maximum number of allowed elements
 +    ptr = the output array of pointers to the first character of each element
 +    returns: the number of elements. */
 +int str_nelem(const char *str,int maxptr,char *ptr[])
 +{
 +  int  np=0;
 +  char *copy0,*copy;
 +  
 +  copy0=strdup(str); 
 +  copy=copy0;
 +  ltrim(copy);
 +  while (*copy != '\0') {
 +    if (np >= maxptr)
 +      gmx_fatal(FARGS,"Too many groups on line: '%s' (max is %d)",
 +                str,maxptr);
 +    if (ptr) 
 +      ptr[np]=copy;
 +    np++;
 +    while ((*copy != '\0') && !isspace(*copy))
 +      copy++;
 +    if (*copy != '\0') {
 +      *copy='\0';
 +      copy++;
 +    }
 +    ltrim(copy);
 +  }
 +  if (ptr == NULL)
 +    sfree(copy0);
 +
 +  return np;
 +}
 +
 +/* interpret a number of doubles from a string and put them in an array,
 +   after allocating space for them.
 +   str = the input string
 +   n = the (pre-allocated) number of doubles read
 +   r = the output array of doubles. */
 +static void parse_n_real(char *str,int *n,real **r)
 +{
 +  char *ptr[MAXPTR];
 +  int  i;
 +
 +  *n = str_nelem(str,MAXPTR,ptr);
 +
 +  snew(*r,*n);
 +  for(i=0; i<*n; i++) {
 +    (*r)[i] = strtod(ptr[i],NULL);
 +  }
 +}
 +
 +static void do_fep_params(t_inputrec *ir, char fep_lambda[][STRLEN],char weights[STRLEN]) {
 +
 +    int i,j,max_n_lambda,nweights,nfep[efptNR];
 +    t_lambda *fep = ir->fepvals;
 +    t_expanded *expand = ir->expandedvals;
 +    real **count_fep_lambdas;
 +    gmx_bool bOneLambda = TRUE;
 +
 +    snew(count_fep_lambdas,efptNR);
 +
 +    /* FEP input processing */
 +    /* first, identify the number of lambda values for each type.
 +       All that are nonzero must have the same number */
 +
 +    for (i=0;i<efptNR;i++)
 +    {
 +        parse_n_real(fep_lambda[i],&(nfep[i]),&(count_fep_lambdas[i]));
 +    }
 +
 +    /* now, determine the number of components.  All must be either zero, or equal. */
 +
 +    max_n_lambda = 0;
 +    for (i=0;i<efptNR;i++)
 +    {
 +        if (nfep[i] > max_n_lambda) {
 +            max_n_lambda = nfep[i];  /* here's a nonzero one.  All of them
 +                                        must have the same number if its not zero.*/
 +            break;
 +        }
 +    }
 +
 +    for (i=0;i<efptNR;i++)
 +    {
 +        if (nfep[i] == 0)
 +        {
 +            ir->fepvals->separate_dvdl[i] = FALSE;
 +        }
 +        else if (nfep[i] == max_n_lambda)
 +        {
 +            if (i!=efptTEMPERATURE)  /* we treat this differently -- not really a reason to compute the derivative with
 +                                        respect to the temperature currently */
 +            {
 +                ir->fepvals->separate_dvdl[i] = TRUE;
 +            }
 +        }
 +        else
 +        {
 +            gmx_fatal(FARGS,"Number of lambdas (%d) for FEP type %s not equal to number of other types (%d)",
 +                      nfep[i],efpt_names[i],max_n_lambda);
 +        }
 +    }
 +    /* we don't print out dhdl if the temperature is changing, since we can't correctly define dhdl in this case */
 +    ir->fepvals->separate_dvdl[efptTEMPERATURE] = FALSE;
 +
 +    /* the number of lambdas is the number we've read in, which is either zero
 +       or the same for all */
 +    fep->n_lambda = max_n_lambda;
 +
 +    /* allocate space for the array of lambda values */
 +    snew(fep->all_lambda,efptNR);
 +    /* if init_lambda is defined, we need to set lambda */
 +    if ((fep->init_lambda > 0) && (fep->n_lambda == 0))
 +    {
 +        ir->fepvals->separate_dvdl[efptFEP] = TRUE;
 +    }
 +    /* otherwise allocate the space for all of the lambdas, and transfer the data */
 +    for (i=0;i<efptNR;i++)
 +    {
 +        snew(fep->all_lambda[i],fep->n_lambda);
 +        if (nfep[i] > 0)  /* if it's zero, then the count_fep_lambda arrays
 +                             are zero */
 +        {
 +            for (j=0;j<fep->n_lambda;j++)
 +            {
 +                fep->all_lambda[i][j] = (double)count_fep_lambdas[i][j];
 +            }
 +            sfree(count_fep_lambdas[i]);
 +        }
 +    }
 +    sfree(count_fep_lambdas);
 +
 +    /* "fep-vals" is either zero or the full number. If zero, we'll need to define fep-lambdas for internal
 +       bookkeeping -- for now, init_lambda */
 +
 +    if ((nfep[efptFEP] == 0) && (fep->init_lambda >= 0) && (fep->init_lambda <= 1))
 +    {
 +        for (i=0;i<fep->n_lambda;i++)
 +        {
 +            fep->all_lambda[efptFEP][i] = fep->init_lambda;
 +        }
 +    }
 +
 +    /* check to see if only a single component lambda is defined, and soft core is defined.
 +       In this case, turn on coulomb soft core */
 +
 +    if (max_n_lambda == 0)
 +    {
 +        bOneLambda = TRUE;
 +    }
 +    else
 +    {
 +        for (i=0;i<efptNR;i++)
 +        {
 +            if ((nfep[i] != 0) && (i!=efptFEP))
 +            {
 +                bOneLambda = FALSE;
 +            }
 +        }
 +    }
 +    if ((bOneLambda) && (fep->sc_alpha > 0))
 +    {
 +        fep->bScCoul = TRUE;
 +    }
 +
 +    /* Fill in the others with the efptFEP if they are not explicitly
 +       specified (i.e. nfep[i] == 0).  This means if fep is not defined,
 +       they are all zero. */
 +
 +    for (i=0;i<efptNR;i++)
 +    {
 +        if ((nfep[i] == 0) && (i!=efptFEP))
 +        {
 +            for (j=0;j<fep->n_lambda;j++)
 +            {
 +                fep->all_lambda[i][j] = fep->all_lambda[efptFEP][j];
 +            }
 +        }
 +    }
 +
 +
 +    /* make it easier if sc_r_power = 48 by increasing it to the 4th power, to be in the right scale. */
 +    if (fep->sc_r_power == 48)
 +    {
 +        if (fep->sc_alpha > 0.1)
 +        {
 +            gmx_fatal(FARGS,"sc_alpha (%f) for sc_r_power = 48 should usually be between 0.001 and 0.004", fep->sc_alpha);
 +        }
 +    }
 +
 +    expand = ir->expandedvals;
 +    /* now read in the weights */
 +    parse_n_real(weights,&nweights,&(expand->init_lambda_weights));
 +    if (nweights == 0)
 +    {
 +        expand->bInit_weights = FALSE;
 +        snew(expand->init_lambda_weights,fep->n_lambda); /* initialize to zero */
 +    }
 +    else if (nweights != fep->n_lambda)
 +    {
 +        gmx_fatal(FARGS,"Number of weights (%d) is not equal to number of lambda values (%d)",
 +                  nweights,fep->n_lambda);
 +    }
 +    else
 +    {
 +        expand->bInit_weights = TRUE;
 +    }
 +    if ((expand->nstexpanded < 0) && (ir->efep != efepNO)) {
 +        expand->nstexpanded = fep->nstdhdl;
 +        /* if you don't specify nstexpanded when doing expanded ensemble free energy calcs, it is set to nstdhdl */
 +    }
 +    if ((expand->nstexpanded < 0) && ir->bSimTemp) {
 +        expand->nstexpanded = 2*(int)(ir->opts.tau_t[0]/ir->delta_t);
 +        /* if you don't specify nstexpanded when doing expanded ensemble simulated tempering, it is set to
 +           2*tau_t just to be careful so it's not to frequent  */
 +    }
 +}
 +
 +
 +static void do_simtemp_params(t_inputrec *ir) {
 +
 +    snew(ir->simtempvals->temperatures,ir->fepvals->n_lambda);
 +    GetSimTemps(ir->fepvals->n_lambda,ir->simtempvals,ir->fepvals->all_lambda[efptTEMPERATURE]);
 +
 +    return;
 +}
 +
 +static void do_wall_params(t_inputrec *ir,
 +                           char *wall_atomtype, char *wall_density,
 +                           t_gromppopts *opts)
 +{
 +    int  nstr,i;
 +    char *names[MAXPTR];
 +    double dbl;
 +
 +    opts->wall_atomtype[0] = NULL;
 +    opts->wall_atomtype[1] = NULL;
 +
 +    ir->wall_atomtype[0] = -1;
 +    ir->wall_atomtype[1] = -1;
 +    ir->wall_density[0] = 0;
 +    ir->wall_density[1] = 0;
 +  
 +    if (ir->nwall > 0)
 +    {
 +        nstr = str_nelem(wall_atomtype,MAXPTR,names);
 +        if (nstr != ir->nwall)
 +        {
 +            gmx_fatal(FARGS,"Expected %d elements for wall_atomtype, found %d",
 +                      ir->nwall,nstr);
 +        }
 +        for(i=0; i<ir->nwall; i++)
 +        {
 +            opts->wall_atomtype[i] = strdup(names[i]);
 +        }
 +    
 +        if (ir->wall_type == ewt93 || ir->wall_type == ewt104) {
 +            nstr = str_nelem(wall_density,MAXPTR,names);
 +            if (nstr != ir->nwall)
 +            {
 +                gmx_fatal(FARGS,"Expected %d elements for wall-density, found %d",ir->nwall,nstr);
 +            }
 +            for(i=0; i<ir->nwall; i++)
 +            {
 +                sscanf(names[i],"%lf",&dbl);
 +                if (dbl <= 0)
 +                {
 +                    gmx_fatal(FARGS,"wall-density[%d] = %f\n",i,dbl);
 +                }
 +                ir->wall_density[i] = dbl;
 +            }
 +        }
 +    }
 +}
 +
 +static void add_wall_energrps(gmx_groups_t *groups,int nwall,t_symtab *symtab)
 +{
 +  int  i;
 +  t_grps *grps;
 +  char str[STRLEN];
 +  
 +  if (nwall > 0) {
 +    srenew(groups->grpname,groups->ngrpname+nwall);
 +    grps = &(groups->grps[egcENER]);
 +    srenew(grps->nm_ind,grps->nr+nwall);
 +    for(i=0; i<nwall; i++) {
 +      sprintf(str,"wall%d",i);
 +      groups->grpname[groups->ngrpname] = put_symtab(symtab,str);
 +      grps->nm_ind[grps->nr++] = groups->ngrpname++;
 +    }
 +  }
 +}
 +
 +void read_expandedparams(int *ninp_p,t_inpfile **inp_p,
 +                         t_expanded *expand,warninp_t wi)
 +{
 +  int  ninp,nerror=0;
 +  t_inpfile *inp;
 +
 +  ninp   = *ninp_p;
 +  inp    = *inp_p;
 +
 +  /* read expanded ensemble parameters */
 +  CCTYPE ("expanded ensemble variables");
 +  ITYPE ("nstexpanded",expand->nstexpanded,-1);
 +  EETYPE("lmc-stats", expand->elamstats, elamstats_names);
 +  EETYPE("lmc-move", expand->elmcmove, elmcmove_names);
 +  EETYPE("lmc-weights-equil",expand->elmceq,elmceq_names);
 +  ITYPE ("weight-equil-number-all-lambda",expand->equil_n_at_lam,-1);
 +  ITYPE ("weight-equil-number-samples",expand->equil_samples,-1);
 +  ITYPE ("weight-equil-number-steps",expand->equil_steps,-1);
 +  RTYPE ("weight-equil-wl-delta",expand->equil_wl_delta,-1);
 +  RTYPE ("weight-equil-count-ratio",expand->equil_ratio,-1);
 +  CCTYPE("Seed for Monte Carlo in lambda space");
 +  ITYPE ("lmc-seed",expand->lmc_seed,-1);
 +  RTYPE ("mc-temperature",expand->mc_temp,-1);
 +  ITYPE ("lmc-repeats",expand->lmc_repeats,1);
 +  ITYPE ("lmc-gibbsdelta",expand->gibbsdeltalam,-1);
 +  ITYPE ("lmc-forced-nstart",expand->lmc_forced_nstart,0);
 +  EETYPE("symmetrized-transition-matrix", expand->bSymmetrizedTMatrix, yesno_names);
 +  ITYPE("nst-transition-matrix", expand->nstTij, -1);
 +  ITYPE ("mininum-var-min",expand->minvarmin, 100); /*default is reasonable */
 +  ITYPE ("weight-c-range",expand->c_range, 0); /* default is just C=0 */
 +  RTYPE ("wl-scale",expand->wl_scale,0.8);
 +  RTYPE ("wl-ratio",expand->wl_ratio,0.8);
 +  RTYPE ("init-wl-delta",expand->init_wl_delta,1.0);
 +  EETYPE("wl-oneovert",expand->bWLoneovert,yesno_names);
 +
 +  *ninp_p   = ninp;
 +  *inp_p    = inp;
 +
 +  return;
 +}
 +
 +void get_ir(const char *mdparin,const char *mdparout,
 +            t_inputrec *ir,t_gromppopts *opts,
 +            warninp_t wi)
 +{
 +  char      *dumstr[2];
 +  double    dumdub[2][6];
 +  t_inpfile *inp;
 +  const char *tmp;
 +  int       i,j,m,ninp;
 +  char      warn_buf[STRLEN];
 +  t_lambda  *fep = ir->fepvals;
 +  t_expanded *expand = ir->expandedvals;
 +
 +  inp = read_inpfile(mdparin, &ninp, NULL, wi);
 +
 +  snew(dumstr[0],STRLEN);
 +  snew(dumstr[1],STRLEN);
 +
 +  /* remove the following deprecated commands */
 +  REM_TYPE("title");
 +  REM_TYPE("cpp");
 +  REM_TYPE("domain-decomposition");
 +  REM_TYPE("andersen-seed");
 +  REM_TYPE("dihre");
 +  REM_TYPE("dihre-fc");
 +  REM_TYPE("dihre-tau");
 +  REM_TYPE("nstdihreout");
 +  REM_TYPE("nstcheckpoint");
 +
 +  /* replace the following commands with the clearer new versions*/
 +  REPL_TYPE("unconstrained-start","continuation");
 +  REPL_TYPE("foreign-lambda","fep-lambdas");
 +
 +  CCTYPE ("VARIOUS PREPROCESSING OPTIONS");
 +  CTYPE ("Preprocessor information: use cpp syntax.");
 +  CTYPE ("e.g.: -I/home/joe/doe -I/home/mary/roe");
 +  STYPE ("include",   opts->include,  NULL);
 +  CTYPE ("e.g.: -DPOSRES -DFLEXIBLE (note these variable names are case sensitive)");
 +  STYPE ("define",    opts->define,   NULL);
 +    
 +  CCTYPE ("RUN CONTROL PARAMETERS");
 +  EETYPE("integrator",  ir->eI,         ei_names);
 +  CTYPE ("Start time and timestep in ps");
 +  RTYPE ("tinit",     ir->init_t,     0.0);
 +  RTYPE ("dt",                ir->delta_t,    0.001);
 +  STEPTYPE ("nsteps",   ir->nsteps,     0);
 +  CTYPE ("For exact run continuation or redoing part of a run");
 +  STEPTYPE ("init-step",ir->init_step,  0);
 +  CTYPE ("Part index is updated automatically on checkpointing (keeps files separate)");
 +  ITYPE ("simulation-part", ir->simulation_part, 1);
 +  CTYPE ("mode for center of mass motion removal");
 +  EETYPE("comm-mode",   ir->comm_mode,  ecm_names);
 +  CTYPE ("number of steps for center of mass motion removal");
 +  ITYPE ("nstcomm",   ir->nstcomm,    100);
 +  CTYPE ("group(s) for center of mass motion removal");
 +  STYPE ("comm-grps",   vcm,            NULL);
 +  
 +  CCTYPE ("LANGEVIN DYNAMICS OPTIONS");
 +  CTYPE ("Friction coefficient (amu/ps) and random seed");
 +  RTYPE ("bd-fric",     ir->bd_fric,    0.0);
 +  ITYPE ("ld-seed",     ir->ld_seed,    1993);
 +  
 +  /* Em stuff */
 +  CCTYPE ("ENERGY MINIMIZATION OPTIONS");
 +  CTYPE ("Force tolerance and initial step-size");
 +  RTYPE ("emtol",       ir->em_tol,     10.0);
 +  RTYPE ("emstep",      ir->em_stepsize,0.01);
 +  CTYPE ("Max number of iterations in relax-shells");
 +  ITYPE ("niter",       ir->niter,      20);
 +  CTYPE ("Step size (ps^2) for minimization of flexible constraints");
 +  RTYPE ("fcstep",      ir->fc_stepsize, 0);
 +  CTYPE ("Frequency of steepest descents steps when doing CG");
 +  ITYPE ("nstcgsteep",        ir->nstcgsteep, 1000);
 +  ITYPE ("nbfgscorr",   ir->nbfgscorr,  10); 
 +
 +  CCTYPE ("TEST PARTICLE INSERTION OPTIONS");
 +  RTYPE ("rtpi",      ir->rtpi,       0.05);
 +
 +  /* Output options */
 +  CCTYPE ("OUTPUT CONTROL OPTIONS");
 +  CTYPE ("Output frequency for coords (x), velocities (v) and forces (f)");
 +  ITYPE ("nstxout",   ir->nstxout,    0);
 +  ITYPE ("nstvout",   ir->nstvout,    0);
 +  ITYPE ("nstfout",   ir->nstfout,    0);
 +  ir->nstcheckpoint = 1000;
 +  CTYPE ("Output frequency for energies to log file and energy file");
 +  ITYPE ("nstlog",    ir->nstlog,     1000);
 +  ITYPE ("nstcalcenergy",ir->nstcalcenergy,   100);
 +  ITYPE ("nstenergy",   ir->nstenergy,  1000);
 +  CTYPE ("Output frequency and precision for .xtc file");
 +  ITYPE ("nstxtcout",   ir->nstxtcout,  0);
 +  RTYPE ("xtc-precision",ir->xtcprec,   1000.0);
 +  CTYPE ("This selects the subset of atoms for the .xtc file. You can");
 +  CTYPE ("select multiple groups. By default all atoms will be written.");
 +  STYPE ("xtc-grps",    xtc_grps,       NULL);
 +  CTYPE ("Selection of energy groups");
 +  STYPE ("energygrps",  energy,         NULL);
 +
 +  /* Neighbor searching */  
 +  CCTYPE ("NEIGHBORSEARCHING PARAMETERS");
 +  CTYPE ("cut-off scheme (group: using charge groups, Verlet: particle based cut-offs)");
 +  EETYPE("cutoff-scheme",     ir->cutoff_scheme,    ecutscheme_names);
 +  CTYPE ("nblist update frequency");
 +  ITYPE ("nstlist",   ir->nstlist,    10);
 +  CTYPE ("ns algorithm (simple or grid)");
 +  EETYPE("ns-type",     ir->ns_type,    ens_names);
 +  /* set ndelta to the optimal value of 2 */
 +  ir->ndelta = 2;
 +  CTYPE ("Periodic boundary conditions: xyz, no, xy");
 +  EETYPE("pbc",         ir->ePBC,       epbc_names);
 +  EETYPE("periodic-molecules", ir->bPeriodicMols, yesno_names);
 +  CTYPE ("Allowed energy drift due to the Verlet buffer in kJ/mol/ps per atom,");
 +  CTYPE ("a value of -1 means: use rlist");
 +  RTYPE("verlet-buffer-drift", ir->verletbuf_drift,    0.005);
 +  CTYPE ("nblist cut-off");
-   RTYPE ("rcoulomb",  ir->rcoulomb,   -1);
++  RTYPE ("rlist",     ir->rlist,      1.0);
 +  CTYPE ("long-range cut-off for switched potentials");
 +  RTYPE ("rlistlong", ir->rlistlong,  -1);
 +  ITYPE ("nstcalclr", ir->nstcalclr,  -1);
 +
 +  /* Electrostatics */
 +  CCTYPE ("OPTIONS FOR ELECTROSTATICS AND VDW");
 +  CTYPE ("Method for doing electrostatics");
 +  EETYPE("coulombtype",       ir->coulombtype,    eel_names);
 +  EETYPE("coulomb-modifier",  ir->coulomb_modifier,    eintmod_names);
 +  CTYPE ("cut-off lengths");
 +  RTYPE ("rcoulomb-switch",   ir->rcoulomb_switch,    0.0);
-   RTYPE ("rvdw",      ir->rvdw,       -1);
++  RTYPE ("rcoulomb",  ir->rcoulomb,   1.0);
 +  CTYPE ("Relative dielectric constant for the medium and the reaction field");
 +  RTYPE ("epsilon-r",   ir->epsilon_r,  1.0);
 +  RTYPE ("epsilon-rf",  ir->epsilon_rf, 0.0);
 +  CTYPE ("Method for doing Van der Waals");
 +  EETYPE("vdw-type",  ir->vdwtype,    evdw_names);
 +  EETYPE("vdw-modifier",      ir->vdw_modifier,    eintmod_names);
 +  CTYPE ("cut-off lengths");
 +  RTYPE ("rvdw-switch",       ir->rvdw_switch,        0.0);
-   CTYPE ("Seperate tables between energy group pairs");
++  RTYPE ("rvdw",      ir->rvdw,       1.0);
 +  CTYPE ("Apply long range dispersion corrections for Energy and Pressure");
 +  EETYPE("DispCorr",    ir->eDispCorr,  edispc_names);
 +  CTYPE ("Extension of the potential lookup tables beyond the cut-off");
 +  RTYPE ("table-extension", ir->tabext, 1.0);
++  CTYPE ("Separate tables between energy group pairs");
 +  STYPE ("energygrp-table", egptable,   NULL);
 +  CTYPE ("Spacing for the PME/PPPM FFT grid");
 +  RTYPE ("fourierspacing", ir->fourier_spacing,0.12);
 +  CTYPE ("FFT grid size, when a value is 0 fourierspacing will be used");
 +  ITYPE ("fourier-nx",  ir->nkx,         0);
 +  ITYPE ("fourier-ny",  ir->nky,         0);
 +  ITYPE ("fourier-nz",  ir->nkz,         0);
 +  CTYPE ("EWALD/PME/PPPM parameters");
 +  ITYPE ("pme-order",   ir->pme_order,   4);
 +  RTYPE ("ewald-rtol",  ir->ewald_rtol, 0.00001);
 +  EETYPE("ewald-geometry", ir->ewald_geometry, eewg_names);
 +  RTYPE ("epsilon-surface", ir->epsilon_surface, 0.0);
 +  EETYPE("optimize-fft",ir->bOptFFT,  yesno_names);
 +
 +  CCTYPE("IMPLICIT SOLVENT ALGORITHM");
 +  EETYPE("implicit-solvent", ir->implicit_solvent, eis_names);
 +      
 +  CCTYPE ("GENERALIZED BORN ELECTROSTATICS"); 
 +  CTYPE ("Algorithm for calculating Born radii");
 +  EETYPE("gb-algorithm", ir->gb_algorithm, egb_names);
 +  CTYPE ("Frequency of calculating the Born radii inside rlist");
 +  ITYPE ("nstgbradii", ir->nstgbradii, 1);
 +  CTYPE ("Cutoff for Born radii calculation; the contribution from atoms");
 +  CTYPE ("between rlist and rgbradii is updated every nstlist steps");
 +  RTYPE ("rgbradii",  ir->rgbradii, 1.0);
 +  CTYPE ("Dielectric coefficient of the implicit solvent");
 +  RTYPE ("gb-epsilon-solvent",ir->gb_epsilon_solvent, 80.0);
 +  CTYPE ("Salt concentration in M for Generalized Born models");
 +  RTYPE ("gb-saltconc",  ir->gb_saltconc, 0.0);
 +  CTYPE ("Scaling factors used in the OBC GB model. Default values are OBC(II)");
 +  RTYPE ("gb-obc-alpha", ir->gb_obc_alpha, 1.0);
 +  RTYPE ("gb-obc-beta", ir->gb_obc_beta, 0.8);
 +  RTYPE ("gb-obc-gamma", ir->gb_obc_gamma, 4.85);
 +  RTYPE ("gb-dielectric-offset", ir->gb_dielectric_offset, 0.009);
 +  EETYPE("sa-algorithm", ir->sa_algorithm, esa_names);
 +  CTYPE ("Surface tension (kJ/mol/nm^2) for the SA (nonpolar surface) part of GBSA");
 +  CTYPE ("The value -1 will set default value for Still/HCT/OBC GB-models.");
 +  RTYPE ("sa-surface-tension", ir->sa_surface_tension, -1);
 +               
 +  /* Coupling stuff */
 +  CCTYPE ("OPTIONS FOR WEAK COUPLING ALGORITHMS");
 +  CTYPE ("Temperature coupling");
 +  EETYPE("tcoupl",    ir->etc,        etcoupl_names);
 +  ITYPE ("nsttcouple", ir->nsttcouple,  -1);
 +  ITYPE("nh-chain-length",     ir->opts.nhchainlength, NHCHAINLENGTH);
 +  EETYPE("print-nose-hoover-chain-variables", ir->bPrintNHChains, yesno_names);
 +  CTYPE ("Groups to couple separately");
 +  STYPE ("tc-grps",     tcgrps,         NULL);
 +  CTYPE ("Time constant (ps) and reference temperature (K)");
 +  STYPE ("tau-t",     tau_t,          NULL);
 +  STYPE ("ref-t",     ref_t,          NULL);
 +  CTYPE ("pressure coupling");
 +  EETYPE("pcoupl",    ir->epc,        epcoupl_names);
 +  EETYPE("pcoupltype",        ir->epct,       epcoupltype_names);
 +  ITYPE ("nstpcouple", ir->nstpcouple,  -1);
 +  CTYPE ("Time constant (ps), compressibility (1/bar) and reference P (bar)");
 +  RTYPE ("tau-p",     ir->tau_p,      1.0);
 +  STYPE ("compressibility",   dumstr[0],      NULL);
 +  STYPE ("ref-p",       dumstr[1],      NULL);
 +  CTYPE ("Scaling of reference coordinates, No, All or COM");
 +  EETYPE ("refcoord-scaling",ir->refcoord_scaling,erefscaling_names);
 +
 +  /* QMMM */
 +  CCTYPE ("OPTIONS FOR QMMM calculations");
 +  EETYPE("QMMM", ir->bQMMM, yesno_names);
 +  CTYPE ("Groups treated Quantum Mechanically");
 +  STYPE ("QMMM-grps",  QMMM,          NULL);
 +  CTYPE ("QM method");
 +  STYPE("QMmethod",     QMmethod, NULL);
 +  CTYPE ("QMMM scheme");
 +  EETYPE("QMMMscheme",  ir->QMMMscheme,    eQMMMscheme_names);
 +  CTYPE ("QM basisset");
 +  STYPE("QMbasis",      QMbasis, NULL);
 +  CTYPE ("QM charge");
 +  STYPE ("QMcharge",    QMcharge,NULL);
 +  CTYPE ("QM multiplicity");
 +  STYPE ("QMmult",      QMmult,NULL);
 +  CTYPE ("Surface Hopping");
 +  STYPE ("SH",          bSH, NULL);
 +  CTYPE ("CAS space options");
 +  STYPE ("CASorbitals",      CASorbitals,   NULL);
 +  STYPE ("CASelectrons",     CASelectrons,  NULL);
 +  STYPE ("SAon", SAon, NULL);
 +  STYPE ("SAoff",SAoff,NULL);
 +  STYPE ("SAsteps",  SAsteps, NULL);
 +  CTYPE ("Scale factor for MM charges");
 +  RTYPE ("MMChargeScaleFactor", ir->scalefactor, 1.0);
 +  CTYPE ("Optimization of QM subsystem");
 +  STYPE ("bOPT",          bOPT, NULL);
 +  STYPE ("bTS",          bTS, NULL);
 +
 +  /* Simulated annealing */
 +  CCTYPE("SIMULATED ANNEALING");
 +  CTYPE ("Type of annealing for each temperature group (no/single/periodic)");
 +  STYPE ("annealing",   anneal,      NULL);
 +  CTYPE ("Number of time points to use for specifying annealing in each group");
 +  STYPE ("annealing-npoints", anneal_npoints, NULL);
 +  CTYPE ("List of times at the annealing points for each group");
 +  STYPE ("annealing-time",       anneal_time,       NULL);
 +  CTYPE ("Temp. at each annealing point, for each group.");
 +  STYPE ("annealing-temp",  anneal_temp,  NULL);
 +  
 +  /* Startup run */
 +  CCTYPE ("GENERATE VELOCITIES FOR STARTUP RUN");
 +  EETYPE("gen-vel",     opts->bGenVel,  yesno_names);
 +  RTYPE ("gen-temp",    opts->tempi,    300.0);
 +  ITYPE ("gen-seed",    opts->seed,     173529);
 +  
 +  /* Shake stuff */
 +  CCTYPE ("OPTIONS FOR BONDS");
 +  EETYPE("constraints",       opts->nshake,   constraints);
 +  CTYPE ("Type of constraint algorithm");
 +  EETYPE("constraint-algorithm",  ir->eConstrAlg, econstr_names);
 +  CTYPE ("Do not constrain the start configuration");
 +  EETYPE("continuation", ir->bContinuation, yesno_names);
 +  CTYPE ("Use successive overrelaxation to reduce the number of shake iterations");
 +  EETYPE("Shake-SOR", ir->bShakeSOR, yesno_names);
 +  CTYPE ("Relative tolerance of shake");
 +  RTYPE ("shake-tol", ir->shake_tol, 0.0001);
 +  CTYPE ("Highest order in the expansion of the constraint coupling matrix");
 +  ITYPE ("lincs-order", ir->nProjOrder, 4);
 +  CTYPE ("Number of iterations in the final step of LINCS. 1 is fine for");
 +  CTYPE ("normal simulations, but use 2 to conserve energy in NVE runs.");
 +  CTYPE ("For energy minimization with constraints it should be 4 to 8.");
 +  ITYPE ("lincs-iter", ir->nLincsIter, 1);
 +  CTYPE ("Lincs will write a warning to the stderr if in one step a bond"); 
 +  CTYPE ("rotates over more degrees than");
 +  RTYPE ("lincs-warnangle", ir->LincsWarnAngle, 30.0);
 +  CTYPE ("Convert harmonic bonds to morse potentials");
 +  EETYPE("morse",       opts->bMorse,yesno_names);
 +
 +  /* Energy group exclusions */
 +  CCTYPE ("ENERGY GROUP EXCLUSIONS");
 +  CTYPE ("Pairs of energy groups for which all non-bonded interactions are excluded");
 +  STYPE ("energygrp-excl", egpexcl,     NULL);
 +  
 +  /* Walls */
 +  CCTYPE ("WALLS");
 +  CTYPE ("Number of walls, type, atom types, densities and box-z scale factor for Ewald");
 +  ITYPE ("nwall", ir->nwall, 0);
 +  EETYPE("wall-type",     ir->wall_type,   ewt_names);
 +  RTYPE ("wall-r-linpot", ir->wall_r_linpot, -1);
 +  STYPE ("wall-atomtype", wall_atomtype, NULL);
 +  STYPE ("wall-density",  wall_density,  NULL);
 +  RTYPE ("wall-ewald-zfac", ir->wall_ewald_zfac, 3);
 +  
 +  /* COM pulling */
 +  CCTYPE("COM PULLING");
 +  CTYPE("Pull type: no, umbrella, constraint or constant-force");
 +  EETYPE("pull",          ir->ePull, epull_names);
 +  if (ir->ePull != epullNO) {
 +    snew(ir->pull,1);
 +    pull_grp = read_pullparams(&ninp,&inp,ir->pull,&opts->pull_start,wi);
 +  }
 +  
 +  /* Enforced rotation */
 +  CCTYPE("ENFORCED ROTATION");
 +  CTYPE("Enforced rotation: No or Yes");
 +  EETYPE("rotation",       ir->bRot, yesno_names);
 +  if (ir->bRot) {
 +    snew(ir->rot,1);
 +    rot_grp = read_rotparams(&ninp,&inp,ir->rot,wi);
 +  }
 +
 +  /* Refinement */
 +  CCTYPE("NMR refinement stuff");
 +  CTYPE ("Distance restraints type: No, Simple or Ensemble");
 +  EETYPE("disre",       ir->eDisre,     edisre_names);
 +  CTYPE ("Force weighting of pairs in one distance restraint: Conservative or Equal");
 +  EETYPE("disre-weighting", ir->eDisreWeighting, edisreweighting_names);
 +  CTYPE ("Use sqrt of the time averaged times the instantaneous violation");
 +  EETYPE("disre-mixed", ir->bDisreMixed, yesno_names);
 +  RTYPE ("disre-fc",  ir->dr_fc,      1000.0);
 +  RTYPE ("disre-tau", ir->dr_tau,     0.0);
 +  CTYPE ("Output frequency for pair distances to energy file");
 +  ITYPE ("nstdisreout", ir->nstdisreout, 100);
 +  CTYPE ("Orientation restraints: No or Yes");
 +  EETYPE("orire",       opts->bOrire,   yesno_names);
 +  CTYPE ("Orientation restraints force constant and tau for time averaging");
 +  RTYPE ("orire-fc",  ir->orires_fc,  0.0);
 +  RTYPE ("orire-tau", ir->orires_tau, 0.0);
 +  STYPE ("orire-fitgrp",orirefitgrp,    NULL);
 +  CTYPE ("Output frequency for trace(SD) and S to energy file");
 +  ITYPE ("nstorireout", ir->nstorireout, 100);
 +
 +  /* free energy variables */
 +  CCTYPE ("Free energy variables");
 +  EETYPE("free-energy", ir->efep, efep_names);
 +  STYPE ("couple-moltype",  couple_moltype,  NULL);
 +  EETYPE("couple-lambda0", opts->couple_lam0, couple_lam);
 +  EETYPE("couple-lambda1", opts->couple_lam1, couple_lam);
 +  EETYPE("couple-intramol", opts->bCoupleIntra, yesno_names);
 +
 +  RTYPE ("init-lambda", fep->init_lambda,-1); /* start with -1 so
 +                                                 we can recognize if
 +                                                 it was not entered */
 +  ITYPE ("init-lambda-state", fep->init_fep_state,0);
 +  RTYPE ("delta-lambda",fep->delta_lambda,0.0);
 +  ITYPE ("nstdhdl",fep->nstdhdl, 100);
 +  STYPE ("fep-lambdas", fep_lambda[efptFEP], NULL);
 +  STYPE ("mass-lambdas", fep_lambda[efptMASS], NULL);
 +  STYPE ("coul-lambdas", fep_lambda[efptCOUL], NULL);
 +  STYPE ("vdw-lambdas", fep_lambda[efptVDW], NULL);
 +  STYPE ("bonded-lambdas", fep_lambda[efptBONDED], NULL);
 +  STYPE ("restraint-lambdas", fep_lambda[efptRESTRAINT], NULL);
 +  STYPE ("temperature-lambdas", fep_lambda[efptTEMPERATURE], NULL);
 +  STYPE ("init-lambda-weights",lambda_weights,NULL);
 +  EETYPE("dhdl-print-energy", fep->bPrintEnergy, yesno_names);
 +  RTYPE ("sc-alpha",fep->sc_alpha,0.0);
 +  ITYPE ("sc-power",fep->sc_power,1);
 +  RTYPE ("sc-r-power",fep->sc_r_power,6.0);
 +  RTYPE ("sc-sigma",fep->sc_sigma,0.3);
 +  EETYPE("sc-coul",fep->bScCoul,yesno_names);
 +  ITYPE ("dh_hist_size", fep->dh_hist_size, 0);
 +  RTYPE ("dh_hist_spacing", fep->dh_hist_spacing, 0.1);
 +  EETYPE("separate-dhdl-file", fep->separate_dhdl_file,
 +                               separate_dhdl_file_names);
 +  EETYPE("dhdl-derivatives", fep->dhdl_derivatives, dhdl_derivatives_names);
 +  ITYPE ("dh_hist_size", fep->dh_hist_size, 0);
 +  RTYPE ("dh_hist_spacing", fep->dh_hist_spacing, 0.1);
 +
 +  /* Non-equilibrium MD stuff */  
 +  CCTYPE("Non-equilibrium MD stuff");
 +  STYPE ("acc-grps",    accgrps,        NULL);
 +  STYPE ("accelerate",  acc,            NULL);
 +  STYPE ("freezegrps",  freeze,         NULL);
 +  STYPE ("freezedim",   frdim,          NULL);
 +  RTYPE ("cos-acceleration", ir->cos_accel, 0);
 +  STYPE ("deform",      deform,         NULL);
 +
 +  /* simulated tempering variables */
 +  CCTYPE("simulated tempering variables");
 +  EETYPE("simulated-tempering",ir->bSimTemp,yesno_names);
 +  EETYPE("simulated-tempering-scaling",ir->simtempvals->eSimTempScale,esimtemp_names);
 +  RTYPE("sim-temp-low",ir->simtempvals->simtemp_low,300.0);
 +  RTYPE("sim-temp-high",ir->simtempvals->simtemp_high,300.0);
 +
 +  /* expanded ensemble variables */
 +  if (ir->efep==efepEXPANDED || ir->bSimTemp)
 +  {
 +      read_expandedparams(&ninp,&inp,expand,wi);
 +  }
 +
 +  /* Electric fields */
 +  CCTYPE("Electric fields");
 +  CTYPE ("Format is number of terms (int) and for all terms an amplitude (real)");
 +  CTYPE ("and a phase angle (real)");
 +  STYPE ("E-x",       efield_x,       NULL);
 +  STYPE ("E-xt",      efield_xt,      NULL);
 +  STYPE ("E-y",       efield_y,       NULL);
 +  STYPE ("E-yt",      efield_yt,      NULL);
 +  STYPE ("E-z",       efield_z,       NULL);
 +  STYPE ("E-zt",      efield_zt,      NULL);
 +  
 +  /* AdResS defined thingies */
 +  CCTYPE ("AdResS parameters");
 +  EETYPE("adress",       ir->bAdress, yesno_names);
 +  if (ir->bAdress) {
 +    snew(ir->adress,1);
 +    read_adressparams(&ninp,&inp,ir->adress,wi);
 +  }
 +
 +  /* User defined thingies */
 +  CCTYPE ("User defined thingies");
 +  STYPE ("user1-grps",  user1,          NULL);
 +  STYPE ("user2-grps",  user2,          NULL);
 +  ITYPE ("userint1",    ir->userint1,   0);
 +  ITYPE ("userint2",    ir->userint2,   0);
 +  ITYPE ("userint3",    ir->userint3,   0);
 +  ITYPE ("userint4",    ir->userint4,   0);
 +  RTYPE ("userreal1",   ir->userreal1,  0);
 +  RTYPE ("userreal2",   ir->userreal2,  0);
 +  RTYPE ("userreal3",   ir->userreal3,  0);
 +  RTYPE ("userreal4",   ir->userreal4,  0);
 +#undef CTYPE
 +
 +  write_inpfile(mdparout,ninp,inp,FALSE,wi);
 +  for (i=0; (i<ninp); i++) {
 +    sfree(inp[i].name);
 +    sfree(inp[i].value);
 +  }
 +  sfree(inp);
 +
 +  /* Process options if necessary */
 +  for(m=0; m<2; m++) {
 +    for(i=0; i<2*DIM; i++)
 +      dumdub[m][i]=0.0;
 +    if(ir->epc) {
 +      switch (ir->epct) {
 +      case epctISOTROPIC:
 +      if (sscanf(dumstr[m],"%lf",&(dumdub[m][XX]))!=1) {
 +        warning_error(wi,"Pressure coupling not enough values (I need 1)");
 +      }
 +      dumdub[m][YY]=dumdub[m][ZZ]=dumdub[m][XX];
 +      break;
 +      case epctSEMIISOTROPIC:
 +      case epctSURFACETENSION:
 +      if (sscanf(dumstr[m],"%lf%lf",
 +                 &(dumdub[m][XX]),&(dumdub[m][ZZ]))!=2) {
 +        warning_error(wi,"Pressure coupling not enough values (I need 2)");
 +      }
 +      dumdub[m][YY]=dumdub[m][XX];
 +      break;
 +      case epctANISOTROPIC:
 +      if (sscanf(dumstr[m],"%lf%lf%lf%lf%lf%lf",
 +                 &(dumdub[m][XX]),&(dumdub[m][YY]),&(dumdub[m][ZZ]),
 +                 &(dumdub[m][3]),&(dumdub[m][4]),&(dumdub[m][5]))!=6) {
 +        warning_error(wi,"Pressure coupling not enough values (I need 6)");
 +      }
 +      break;
 +      default:
 +      gmx_fatal(FARGS,"Pressure coupling type %s not implemented yet",
 +                  epcoupltype_names[ir->epct]);
 +      }
 +    }
 +  }
 +  clear_mat(ir->ref_p);
 +  clear_mat(ir->compress);
 +  for(i=0; i<DIM; i++) {
 +    ir->ref_p[i][i]    = dumdub[1][i];
 +    ir->compress[i][i] = dumdub[0][i];
 +  }
 +  if (ir->epct == epctANISOTROPIC) {
 +    ir->ref_p[XX][YY] = dumdub[1][3];
 +    ir->ref_p[XX][ZZ] = dumdub[1][4];
 +    ir->ref_p[YY][ZZ] = dumdub[1][5];
 +    if (ir->ref_p[XX][YY]!=0 && ir->ref_p[XX][ZZ]!=0 && ir->ref_p[YY][ZZ]!=0) {
 +      warning(wi,"All off-diagonal reference pressures are non-zero. Are you sure you want to apply a threefold shear stress?\n");
 +    }
 +    ir->compress[XX][YY] = dumdub[0][3];
 +    ir->compress[XX][ZZ] = dumdub[0][4];
 +    ir->compress[YY][ZZ] = dumdub[0][5];
 +    for(i=0; i<DIM; i++) {
 +      for(m=0; m<i; m++) {
 +      ir->ref_p[i][m] = ir->ref_p[m][i];
 +      ir->compress[i][m] = ir->compress[m][i];
 +      }
 +    }
 +  } 
 +  
 +  if (ir->comm_mode == ecmNO)
 +    ir->nstcomm = 0;
 +
 +  opts->couple_moltype = NULL;
 +  if (strlen(couple_moltype) > 0) 
 +  {
 +      if (ir->efep != efepNO) 
 +      {
 +          opts->couple_moltype = strdup(couple_moltype);
 +          if (opts->couple_lam0 == opts->couple_lam1)
 +          {
 +              warning(wi,"The lambda=0 and lambda=1 states for coupling are identical");
 +          }
 +          if (ir->eI == eiMD && (opts->couple_lam0 == ecouplamNONE ||
 +                                 opts->couple_lam1 == ecouplamNONE)) 
 +          {
 +              warning(wi,"For proper sampling of the (nearly) decoupled state, stochastic dynamics should be used");
 +          }
 +      }
 +      else
 +      {
 +          warning(wi,"Can not couple a molecule with free_energy = no");
 +      }
 +  }
 +  /* FREE ENERGY AND EXPANDED ENSEMBLE OPTIONS */
 +  if (ir->efep != efepNO) {
 +      if (fep->delta_lambda > 0) {
 +          ir->efep = efepSLOWGROWTH;
 +      }
 +  }
 +
 +  if (ir->bSimTemp) {
 +      fep->bPrintEnergy = TRUE;
 +      /* always print out the energy to dhdl if we are doing expanded ensemble, since we need the total energy
 +         if the temperature is changing. */
 +  }
 +
 +  if ((ir->efep != efepNO) || ir->bSimTemp)
 +  {
 +      ir->bExpanded = FALSE;
 +      if ((ir->efep == efepEXPANDED) || ir->bSimTemp)
 +      {
 +          ir->bExpanded = TRUE;
 +      }
 +      do_fep_params(ir,fep_lambda,lambda_weights);
 +      if (ir->bSimTemp) { /* done after fep params */
 +          do_simtemp_params(ir);
 +      }
 +  }
 +  else
 +  {
 +      ir->fepvals->n_lambda = 0;
 +  }
 +
 +  /* WALL PARAMETERS */
 +
 +  do_wall_params(ir,wall_atomtype,wall_density,opts);
 +
 +  /* ORIENTATION RESTRAINT PARAMETERS */
 +  
 +  if (opts->bOrire && str_nelem(orirefitgrp,MAXPTR,NULL)!=1) {
 +      warning_error(wi,"ERROR: Need one orientation restraint fit group\n");
 +  }
 +
 +  /* DEFORMATION PARAMETERS */
 +
 +  clear_mat(ir->deform);
 +  for(i=0; i<6; i++)
 +  {
 +      dumdub[0][i] = 0;
 +  }
 +  m = sscanf(deform,"%lf %lf %lf %lf %lf %lf",
 +           &(dumdub[0][0]),&(dumdub[0][1]),&(dumdub[0][2]),
 +           &(dumdub[0][3]),&(dumdub[0][4]),&(dumdub[0][5]));
 +  for(i=0; i<3; i++)
 +  {
 +      ir->deform[i][i] = dumdub[0][i];
 +  }
 +  ir->deform[YY][XX] = dumdub[0][3];
 +  ir->deform[ZZ][XX] = dumdub[0][4];
 +  ir->deform[ZZ][YY] = dumdub[0][5];
 +  if (ir->epc != epcNO) {
 +    for(i=0; i<3; i++)
 +      for(j=0; j<=i; j++)
 +      if (ir->deform[i][j]!=0 && ir->compress[i][j]!=0) {
 +        warning_error(wi,"A box element has deform set and compressibility > 0");
 +      }
 +    for(i=0; i<3; i++)
 +      for(j=0; j<i; j++)
 +      if (ir->deform[i][j]!=0) {
 +        for(m=j; m<DIM; m++)
 +          if (ir->compress[m][j]!=0) {
 +            sprintf(warn_buf,"An off-diagonal box element has deform set while compressibility > 0 for the same component of another box vector, this might lead to spurious periodicity effects.");
 +            warning(wi,warn_buf);
 +          }
 +      }
 +  }
 +
 +  sfree(dumstr[0]);
 +  sfree(dumstr[1]);
 +}
 +
 +static int search_QMstring(char *s,int ng,const char *gn[])
 +{
 +  /* same as normal search_string, but this one searches QM strings */
 +  int i;
 +
 +  for(i=0; (i<ng); i++)
 +    if (gmx_strcasecmp(s,gn[i]) == 0)
 +      return i;
 +
 +  gmx_fatal(FARGS,"this QM method or basisset (%s) is not implemented\n!",s);
 +
 +  return -1;
 +
 +} /* search_QMstring */
 +
 +
 +int search_string(char *s,int ng,char *gn[])
 +{
 +  int i;
 +  
 +  for(i=0; (i<ng); i++)
 +  {
 +    if (gmx_strcasecmp(s,gn[i]) == 0)
 +    {
 +      return i;
 +    }
 +  }
 +    
 +  gmx_fatal(FARGS,
 +            "Group %s referenced in the .mdp file was not found in the index file.\n"
 +            "Group names must match either [moleculetype] names or custom index group\n"
 +            "names, in which case you must supply an index file to the '-n' option\n"
 +            "of grompp.",
 +            s);
 +  
 +  return -1;
 +}
 +
 +static gmx_bool do_numbering(int natoms,gmx_groups_t *groups,int ng,char *ptrs[],
 +                         t_blocka *block,char *gnames[],
 +                         int gtype,int restnm,
 +                         int grptp,gmx_bool bVerbose,
 +                         warninp_t wi)
 +{
 +    unsigned short *cbuf;
 +    t_grps *grps=&(groups->grps[gtype]);
 +    int    i,j,gid,aj,ognr,ntot=0;
 +    const char *title;
 +    gmx_bool   bRest;
 +    char   warn_buf[STRLEN];
 +
 +    if (debug)
 +    {
 +        fprintf(debug,"Starting numbering %d groups of type %d\n",ng,gtype);
 +    }
 +  
 +    title = gtypes[gtype];
 +    
 +    snew(cbuf,natoms);
 +    /* Mark all id's as not set */
 +    for(i=0; (i<natoms); i++)
 +    {
 +        cbuf[i] = NOGID;
 +    }
 +  
 +    snew(grps->nm_ind,ng+1); /* +1 for possible rest group */
 +    for(i=0; (i<ng); i++)
 +    {
 +        /* Lookup the group name in the block structure */
 +        gid = search_string(ptrs[i],block->nr,gnames);
 +        if ((grptp != egrptpONE) || (i == 0))
 +        {
 +            grps->nm_ind[grps->nr++]=gid;
 +        }
 +        if (debug) 
 +        {
 +            fprintf(debug,"Found gid %d for group %s\n",gid,ptrs[i]);
 +        }
 +    
 +        /* Now go over the atoms in the group */
 +        for(j=block->index[gid]; (j<block->index[gid+1]); j++)
 +        {
 +
 +            aj=block->a[j];
 +      
 +            /* Range checking */
 +            if ((aj < 0) || (aj >= natoms)) 
 +            {
 +                gmx_fatal(FARGS,"Invalid atom number %d in indexfile",aj);
 +            }
 +            /* Lookup up the old group number */
 +            ognr = cbuf[aj];
 +            if (ognr != NOGID)
 +            {
 +                gmx_fatal(FARGS,"Atom %d in multiple %s groups (%d and %d)",
 +                          aj+1,title,ognr+1,i+1);
 +            }
 +            else
 +            {
 +                /* Store the group number in buffer */
 +                if (grptp == egrptpONE)
 +                {
 +                    cbuf[aj] = 0;
 +                }
 +                else
 +                {
 +                    cbuf[aj] = i;
 +                }
 +                ntot++;
 +            }
 +        }
 +    }
 +    
 +    /* Now check whether we have done all atoms */
 +    bRest = FALSE;
 +    if (ntot != natoms)
 +    {
 +        if (grptp == egrptpALL)
 +        {
 +            gmx_fatal(FARGS,"%d atoms are not part of any of the %s groups",
 +                      natoms-ntot,title);
 +        }
 +        else if (grptp == egrptpPART)
 +        {
 +            sprintf(warn_buf,"%d atoms are not part of any of the %s groups",
 +                    natoms-ntot,title);
 +            warning_note(wi,warn_buf);
 +        }
 +        /* Assign all atoms currently unassigned to a rest group */
 +        for(j=0; (j<natoms); j++)
 +        {
 +            if (cbuf[j] == NOGID)
 +            {
 +                cbuf[j] = grps->nr;
 +                bRest = TRUE;
 +            }
 +        }
 +        if (grptp != egrptpPART)
 +        {
 +            if (bVerbose)
 +            {
 +                fprintf(stderr,
 +                        "Making dummy/rest group for %s containing %d elements\n",
 +                        title,natoms-ntot);
 +            }
 +            /* Add group name "rest" */ 
 +            grps->nm_ind[grps->nr] = restnm;
 +            
 +            /* Assign the rest name to all atoms not currently assigned to a group */
 +            for(j=0; (j<natoms); j++)
 +            {
 +                if (cbuf[j] == NOGID)
 +                {
 +                    cbuf[j] = grps->nr;
 +                }
 +            }
 +            grps->nr++;
 +        }
 +    }
 +    
 +    if (grps->nr == 1 && (ntot == 0 || ntot == natoms))
 +    {
 +        /* All atoms are part of one (or no) group, no index required */
 +        groups->ngrpnr[gtype] = 0;
 +        groups->grpnr[gtype]  = NULL;
 +    }
 +    else
 +    {
 +        groups->ngrpnr[gtype] = natoms;
 +        snew(groups->grpnr[gtype],natoms);
 +        for(j=0; (j<natoms); j++)
 +        {
 +            groups->grpnr[gtype][j] = cbuf[j];
 +        }
 +    }
 +    
 +    sfree(cbuf);
 +
 +    return (bRest && grptp == egrptpPART);
 +}
 +
 +static void calc_nrdf(gmx_mtop_t *mtop,t_inputrec *ir,char **gnames)
 +{
 +  t_grpopts *opts;
 +  gmx_groups_t *groups;
 +  t_pull  *pull;
 +  int     natoms,ai,aj,i,j,d,g,imin,jmin,nc;
 +  t_iatom *ia;
 +  int     *nrdf2,*na_vcm,na_tot;
 +  double  *nrdf_tc,*nrdf_vcm,nrdf_uc,n_sub=0;
 +  gmx_mtop_atomloop_all_t aloop;
 +  t_atom  *atom;
 +  int     mb,mol,ftype,as;
 +  gmx_molblock_t *molb;
 +  gmx_moltype_t *molt;
 +
 +  /* Calculate nrdf. 
 +   * First calc 3xnr-atoms for each group
 +   * then subtract half a degree of freedom for each constraint
 +   *
 +   * Only atoms and nuclei contribute to the degrees of freedom...
 +   */
 +
 +  opts = &ir->opts;
 +  
 +  groups = &mtop->groups;
 +  natoms = mtop->natoms;
 +
 +  /* Allocate one more for a possible rest group */
 +  /* We need to sum degrees of freedom into doubles,
 +   * since floats give too low nrdf's above 3 million atoms.
 +   */
 +  snew(nrdf_tc,groups->grps[egcTC].nr+1);
 +  snew(nrdf_vcm,groups->grps[egcVCM].nr+1);
 +  snew(na_vcm,groups->grps[egcVCM].nr+1);
 +  
 +  for(i=0; i<groups->grps[egcTC].nr; i++)
 +    nrdf_tc[i] = 0;
 +  for(i=0; i<groups->grps[egcVCM].nr+1; i++)
 +    nrdf_vcm[i] = 0;
 +
 +  snew(nrdf2,natoms);
 +  aloop = gmx_mtop_atomloop_all_init(mtop);
 +  while (gmx_mtop_atomloop_all_next(aloop,&i,&atom)) {
 +    nrdf2[i] = 0;
 +    if (atom->ptype == eptAtom || atom->ptype == eptNucleus) {
 +      g = ggrpnr(groups,egcFREEZE,i);
 +      /* Double count nrdf for particle i */
 +      for(d=0; d<DIM; d++) {
 +      if (opts->nFreeze[g][d] == 0) {
 +        nrdf2[i] += 2;
 +      }
 +      }
 +      nrdf_tc [ggrpnr(groups,egcTC ,i)] += 0.5*nrdf2[i];
 +      nrdf_vcm[ggrpnr(groups,egcVCM,i)] += 0.5*nrdf2[i];
 +    }
 +  }
 +
 +  as = 0;
 +  for(mb=0; mb<mtop->nmolblock; mb++) {
 +    molb = &mtop->molblock[mb];
 +    molt = &mtop->moltype[molb->type];
 +    atom = molt->atoms.atom;
 +    for(mol=0; mol<molb->nmol; mol++) {
 +      for (ftype=F_CONSTR; ftype<=F_CONSTRNC; ftype++) {
 +      ia = molt->ilist[ftype].iatoms;
 +      for(i=0; i<molt->ilist[ftype].nr; ) {
 +        /* Subtract degrees of freedom for the constraints,
 +         * if the particles still have degrees of freedom left.
 +         * If one of the particles is a vsite or a shell, then all
 +         * constraint motion will go there, but since they do not
 +         * contribute to the constraints the degrees of freedom do not
 +         * change.
 +         */
 +        ai = as + ia[1];
 +        aj = as + ia[2];
 +        if (((atom[ia[1]].ptype == eptNucleus) ||
 +             (atom[ia[1]].ptype == eptAtom)) &&
 +            ((atom[ia[2]].ptype == eptNucleus) ||
 +             (atom[ia[2]].ptype == eptAtom))) {
 +          if (nrdf2[ai] > 0) 
 +            jmin = 1;
 +          else
 +            jmin = 2;
 +          if (nrdf2[aj] > 0)
 +            imin = 1;
 +          else
 +            imin = 2;
 +          imin = min(imin,nrdf2[ai]);
 +          jmin = min(jmin,nrdf2[aj]);
 +          nrdf2[ai] -= imin;
 +          nrdf2[aj] -= jmin;
 +          nrdf_tc [ggrpnr(groups,egcTC ,ai)] -= 0.5*imin;
 +          nrdf_tc [ggrpnr(groups,egcTC ,aj)] -= 0.5*jmin;
 +          nrdf_vcm[ggrpnr(groups,egcVCM,ai)] -= 0.5*imin;
 +          nrdf_vcm[ggrpnr(groups,egcVCM,aj)] -= 0.5*jmin;
 +        }
 +        ia += interaction_function[ftype].nratoms+1;
 +        i  += interaction_function[ftype].nratoms+1;
 +      }
 +      }
 +      ia = molt->ilist[F_SETTLE].iatoms;
 +      for(i=0; i<molt->ilist[F_SETTLE].nr; ) {
 +      /* Subtract 1 dof from every atom in the SETTLE */
 +      for(j=0; j<3; j++) {
 +      ai = as + ia[1+j];
 +        imin = min(2,nrdf2[ai]);
 +        nrdf2[ai] -= imin;
 +        nrdf_tc [ggrpnr(groups,egcTC ,ai)] -= 0.5*imin;
 +        nrdf_vcm[ggrpnr(groups,egcVCM,ai)] -= 0.5*imin;
 +      }
 +      ia += 4;
 +      i  += 4;
 +      }
 +      as += molt->atoms.nr;
 +    }
 +  }
 +
 +  if (ir->ePull == epullCONSTRAINT) {
 +    /* Correct nrdf for the COM constraints.
 +     * We correct using the TC and VCM group of the first atom
 +     * in the reference and pull group. If atoms in one pull group
 +     * belong to different TC or VCM groups it is anyhow difficult
 +     * to determine the optimal nrdf assignment.
 +     */
 +    pull = ir->pull;
 +    if (pull->eGeom == epullgPOS) {
 +      nc = 0;
 +      for(i=0; i<DIM; i++) {
 +      if (pull->dim[i])
 +        nc++;
 +      }
 +    } else {
 +      nc = 1;
 +    }
 +    for(i=0; i<pull->ngrp; i++) {
 +      imin = 2*nc;
 +      if (pull->grp[0].nat > 0) {
 +      /* Subtract 1/2 dof from the reference group */
 +      ai = pull->grp[0].ind[0];
 +      if (nrdf_tc[ggrpnr(groups,egcTC,ai)] > 1) {
 +        nrdf_tc [ggrpnr(groups,egcTC ,ai)] -= 0.5;
 +        nrdf_vcm[ggrpnr(groups,egcVCM,ai)] -= 0.5;
 +        imin--;
 +      }
 +      }
 +      /* Subtract 1/2 dof from the pulled group */
 +      ai = pull->grp[1+i].ind[0];
 +      nrdf_tc [ggrpnr(groups,egcTC ,ai)] -= 0.5*imin;
 +      nrdf_vcm[ggrpnr(groups,egcVCM,ai)] -= 0.5*imin;
 +      if (nrdf_tc[ggrpnr(groups,egcTC,ai)] < 0)
 +      gmx_fatal(FARGS,"Center of mass pulling constraints caused the number of degrees of freedom for temperature coupling group %s to be negative",gnames[groups->grps[egcTC].nm_ind[ggrpnr(groups,egcTC,ai)]]);
 +    }
 +  }
 +  
 +  if (ir->nstcomm != 0) {
 +    /* Subtract 3 from the number of degrees of freedom in each vcm group
 +     * when com translation is removed and 6 when rotation is removed
 +     * as well.
 +     */
 +    switch (ir->comm_mode) {
 +    case ecmLINEAR:
 +      n_sub = ndof_com(ir);
 +      break;
 +    case ecmANGULAR:
 +      n_sub = 6;
 +      break;
 +    default:
 +      n_sub = 0;
 +      gmx_incons("Checking comm_mode");
 +    }
 +    
 +    for(i=0; i<groups->grps[egcTC].nr; i++) {
 +      /* Count the number of atoms of TC group i for every VCM group */
 +      for(j=0; j<groups->grps[egcVCM].nr+1; j++)
 +      na_vcm[j] = 0;
 +      na_tot = 0;
 +      for(ai=0; ai<natoms; ai++)
 +      if (ggrpnr(groups,egcTC,ai) == i) {
 +        na_vcm[ggrpnr(groups,egcVCM,ai)]++;
 +        na_tot++;
 +      }
 +      /* Correct for VCM removal according to the fraction of each VCM
 +       * group present in this TC group.
 +       */
 +      nrdf_uc = nrdf_tc[i];
 +      if (debug) {
 +      fprintf(debug,"T-group[%d] nrdf_uc = %g, n_sub = %g\n",
 +              i,nrdf_uc,n_sub);
 +      }
 +      nrdf_tc[i] = 0;
 +      for(j=0; j<groups->grps[egcVCM].nr+1; j++) {
 +      if (nrdf_vcm[j] > n_sub) {
 +        nrdf_tc[i] += nrdf_uc*((double)na_vcm[j]/(double)na_tot)*
 +          (nrdf_vcm[j] - n_sub)/nrdf_vcm[j];
 +      }
 +      if (debug) {
 +        fprintf(debug,"  nrdf_vcm[%d] = %g, nrdf = %g\n",
 +                j,nrdf_vcm[j],nrdf_tc[i]);
 +      }
 +      }
 +    }
 +  }
 +  for(i=0; (i<groups->grps[egcTC].nr); i++) {
 +    opts->nrdf[i] = nrdf_tc[i];
 +    if (opts->nrdf[i] < 0)
 +      opts->nrdf[i] = 0;
 +    fprintf(stderr,
 +          "Number of degrees of freedom in T-Coupling group %s is %.2f\n",
 +          gnames[groups->grps[egcTC].nm_ind[i]],opts->nrdf[i]);
 +  }
 +  
 +  sfree(nrdf2);
 +  sfree(nrdf_tc);
 +  sfree(nrdf_vcm);
 +  sfree(na_vcm);
 +}
 +
 +static void decode_cos(char *s,t_cosines *cosine,gmx_bool bTime)
 +{
 +  char   *t;
 +  char   format[STRLEN],f1[STRLEN];
 +  double a,phi;
 +  int    i;
 +  
 +  t=strdup(s);
 +  trim(t);
 +  
 +  cosine->n=0;
 +  cosine->a=NULL;
 +  cosine->phi=NULL;
 +  if (strlen(t)) {
 +    sscanf(t,"%d",&(cosine->n));
 +    if (cosine->n <= 0) {
 +      cosine->n=0;
 +    } else {
 +      snew(cosine->a,cosine->n);
 +      snew(cosine->phi,cosine->n);
 +      
 +      sprintf(format,"%%*d");
 +      for(i=0; (i<cosine->n); i++) {
 +      strcpy(f1,format);
 +      strcat(f1,"%lf%lf");
 +      if (sscanf(t,f1,&a,&phi) < 2)
 +        gmx_fatal(FARGS,"Invalid input for electric field shift: '%s'",t);
 +      cosine->a[i]=a;
 +      cosine->phi[i]=phi;
 +      strcat(format,"%*lf%*lf");
 +      }
 +    }
 +  }
 +  sfree(t);
 +}
 +
 +static gmx_bool do_egp_flag(t_inputrec *ir,gmx_groups_t *groups,
 +                      const char *option,const char *val,int flag)
 +{
 +  /* The maximum number of energy group pairs would be MAXPTR*(MAXPTR+1)/2.
 +   * But since this is much larger than STRLEN, such a line can not be parsed.
 +   * The real maximum is the number of names that fit in a string: STRLEN/2.
 +   */
 +#define EGP_MAX (STRLEN/2)
 +  int  nelem,i,j,k,nr;
 +  char *names[EGP_MAX];
 +  char ***gnames;
 +  gmx_bool bSet;
 +
 +  gnames = groups->grpname;
 +
 +  nelem = str_nelem(val,EGP_MAX,names);
 +  if (nelem % 2 != 0)
 +    gmx_fatal(FARGS,"The number of groups for %s is odd",option);
 +  nr = groups->grps[egcENER].nr;
 +  bSet = FALSE;
 +  for(i=0; i<nelem/2; i++) {
 +    j = 0;
 +    while ((j < nr) &&
 +         gmx_strcasecmp(names[2*i],*(gnames[groups->grps[egcENER].nm_ind[j]])))
 +      j++;
 +    if (j == nr)
 +      gmx_fatal(FARGS,"%s in %s is not an energy group\n",
 +                names[2*i],option);
 +    k = 0;
 +    while ((k < nr) &&
 +         gmx_strcasecmp(names[2*i+1],*(gnames[groups->grps[egcENER].nm_ind[k]])))
 +      k++;
 +    if (k==nr)
 +      gmx_fatal(FARGS,"%s in %s is not an energy group\n",
 +            names[2*i+1],option);
 +    if ((j < nr) && (k < nr)) {
 +      ir->opts.egp_flags[nr*j+k] |= flag;
 +      ir->opts.egp_flags[nr*k+j] |= flag;
 +      bSet = TRUE;
 +    }
 +  }
 +
 +  return bSet;
 +}
 +
 +void do_index(const char* mdparin, const char *ndx,
 +              gmx_mtop_t *mtop,
 +              gmx_bool bVerbose,
 +              t_inputrec *ir,rvec *v,
 +              warninp_t wi)
 +{
 +  t_blocka *grps;
 +  gmx_groups_t *groups;
 +  int     natoms;
 +  t_symtab *symtab;
 +  t_atoms atoms_all;
 +  char    warnbuf[STRLEN],**gnames;
 +  int     nr,ntcg,ntau_t,nref_t,nacc,nofg,nSA,nSA_points,nSA_time,nSA_temp;
 +  real    tau_min;
 +  int     nstcmin;
 +  int     nacg,nfreeze,nfrdim,nenergy,nvcm,nuser;
 +  char    *ptr1[MAXPTR],*ptr2[MAXPTR],*ptr3[MAXPTR];
 +  int     i,j,k,restnm;
 +  real    SAtime;
 +  gmx_bool    bExcl,bTable,bSetTCpar,bAnneal,bRest;
 +  int     nQMmethod,nQMbasis,nQMcharge,nQMmult,nbSH,nCASorb,nCASelec,
 +    nSAon,nSAoff,nSAsteps,nQMg,nbOPT,nbTS;
 +  char    warn_buf[STRLEN];
 +
 +  if (bVerbose)
 +    fprintf(stderr,"processing index file...\n");
 +  debug_gmx();
 +  if (ndx == NULL) {
 +    snew(grps,1);
 +    snew(grps->index,1);
 +    snew(gnames,1);
 +    atoms_all = gmx_mtop_global_atoms(mtop);
 +    analyse(&atoms_all,grps,&gnames,FALSE,TRUE);
 +    free_t_atoms(&atoms_all,FALSE);
 +  } else {
 +    grps = init_index(ndx,&gnames);
 +  }
 +
 +  groups = &mtop->groups;
 +  natoms = mtop->natoms;
 +  symtab = &mtop->symtab;
 +
 +  snew(groups->grpname,grps->nr+1);
 +  
 +  for(i=0; (i<grps->nr); i++) {
 +    groups->grpname[i] = put_symtab(symtab,gnames[i]);
 +  }
 +  groups->grpname[i] = put_symtab(symtab,"rest");
 +  restnm=i;
 +  srenew(gnames,grps->nr+1);
 +  gnames[restnm] = *(groups->grpname[i]);
 +  groups->ngrpname = grps->nr+1;
 +
 +  set_warning_line(wi,mdparin,-1);
 +
 +  ntau_t = str_nelem(tau_t,MAXPTR,ptr1);
 +  nref_t = str_nelem(ref_t,MAXPTR,ptr2);
 +  ntcg   = str_nelem(tcgrps,MAXPTR,ptr3);
 +  if ((ntau_t != ntcg) || (nref_t != ntcg)) {
 +    gmx_fatal(FARGS,"Invalid T coupling input: %d groups, %d ref-t values and "
 +                "%d tau-t values",ntcg,nref_t,ntau_t);
 +  }
 +
 +  bSetTCpar = (ir->etc || EI_SD(ir->eI) || ir->eI==eiBD || EI_TPI(ir->eI));
 +  do_numbering(natoms,groups,ntcg,ptr3,grps,gnames,egcTC,
 +               restnm,bSetTCpar ? egrptpALL : egrptpALL_GENREST,bVerbose,wi);
 +  nr = groups->grps[egcTC].nr;
 +  ir->opts.ngtc = nr;
 +  snew(ir->opts.nrdf,nr);
 +  snew(ir->opts.tau_t,nr);
 +  snew(ir->opts.ref_t,nr);
 +  if (ir->eI==eiBD && ir->bd_fric==0) {
 +    fprintf(stderr,"bd-fric=0, so tau-t will be used as the inverse friction constant(s)\n");
 +  }
 +
 +  if (bSetTCpar)
 +  {
 +      if (nr != nref_t)
 +      {
 +          gmx_fatal(FARGS,"Not enough ref-t and tau-t values!");
 +      }
 +      
 +      tau_min = 1e20;
 +      for(i=0; (i<nr); i++)
 +      {
 +          ir->opts.tau_t[i] = strtod(ptr1[i],NULL);
 +          if ((ir->eI == eiBD || ir->eI == eiSD2) && ir->opts.tau_t[i] <= 0)
 +          {
 +              sprintf(warn_buf,"With integrator %s tau-t should be larger than 0",ei_names[ir->eI]);
 +              warning_error(wi,warn_buf);
 +          }
 +
 +          if (ir->etc != etcVRESCALE && ir->opts.tau_t[i] == 0)
 +          {
 +              warning_note(wi,"tau-t = -1 is the value to signal that a group should not have temperature coupling. Treating your use of tau-t = 0 as if you used -1.");
 +          }
 +
 +          if (ir->opts.tau_t[i] >= 0)
 +          {
 +              tau_min = min(tau_min,ir->opts.tau_t[i]);
 +          }
 +      }
 +      if (ir->etc != etcNO && ir->nsttcouple == -1)
 +      {
 +            ir->nsttcouple = ir_optimal_nsttcouple(ir);
 +      }
 +
 +      if (EI_VV(ir->eI)) 
 +      {
 +          if ((ir->etc==etcNOSEHOOVER) && (ir->epc==epcBERENDSEN)) {
 +              gmx_fatal(FARGS,"Cannot do Nose-Hoover temperature with Berendsen pressure control with md-vv; use either vrescale temperature with berendsen pressure or Nose-Hoover temperature with MTTK pressure");
 +          }
 +          if ((ir->epc==epcMTTK) && (ir->etc>etcNO))
 +          {
 +              int mincouple;
 +              mincouple = ir->nsttcouple;
 +              if (ir->nstpcouple < mincouple)
 +              {
 +                  mincouple = ir->nstpcouple;
 +              }
 +              ir->nstpcouple = mincouple;
 +              ir->nsttcouple = mincouple;
 +              sprintf(warn_buf,"for current Trotter decomposition methods with vv, nsttcouple and nstpcouple must be equal.  Both have been reset to min(nsttcouple,nstpcouple) = %d",mincouple);
 +              warning_note(wi,warn_buf);
 +          }
 +      }
 +      /* velocity verlet with averaged kinetic energy KE = 0.5*(v(t+1/2) - v(t-1/2)) is implemented
 +         primarily for testing purposes, and does not work with temperature coupling other than 1 */
 +
 +      if (ETC_ANDERSEN(ir->etc)) {
 +          if (ir->nsttcouple != 1) {
 +              ir->nsttcouple = 1;
 +              sprintf(warn_buf,"Andersen temperature control methods assume nsttcouple = 1; there is no need for larger nsttcouple > 1, since no global parameters are computed. nsttcouple has been reset to 1");
 +              warning_note(wi,warn_buf);
 +          }
 +      }
 +      nstcmin = tcouple_min_integration_steps(ir->etc);
 +      if (nstcmin > 1)
 +      {
 +          if (tau_min/(ir->delta_t*ir->nsttcouple) < nstcmin)
 +          {
 +              sprintf(warn_buf,"For proper integration of the %s thermostat, tau-t (%g) should be at least %d times larger than nsttcouple*dt (%g)",
 +                      ETCOUPLTYPE(ir->etc),
 +                      tau_min,nstcmin,
 +                      ir->nsttcouple*ir->delta_t);
 +              warning(wi,warn_buf);
 +          }
 +      }
 +      for(i=0; (i<nr); i++)
 +      {
 +          ir->opts.ref_t[i] = strtod(ptr2[i],NULL);
 +          if (ir->opts.ref_t[i] < 0)
 +          {
 +              gmx_fatal(FARGS,"ref-t for group %d negative",i);
 +          }
 +      }
 +      /* set the lambda mc temperature to the md integrator temperature (which should be defined
 +         if we are in this conditional) if mc_temp is negative */
 +      if (ir->expandedvals->mc_temp < 0)
 +      {
 +          ir->expandedvals->mc_temp = ir->opts.ref_t[0];  /*for now, set to the first reft */
 +      }
 +  }
 +
 +  /* Simulated annealing for each group. There are nr groups */
 +  nSA = str_nelem(anneal,MAXPTR,ptr1);
 +  if (nSA == 1 && (ptr1[0][0]=='n' || ptr1[0][0]=='N'))
 +     nSA = 0;
 +  if(nSA>0 && nSA != nr) 
 +    gmx_fatal(FARGS,"Not enough annealing values: %d (for %d groups)\n",nSA,nr);
 +  else {
 +    snew(ir->opts.annealing,nr);
 +    snew(ir->opts.anneal_npoints,nr);
 +    snew(ir->opts.anneal_time,nr);
 +    snew(ir->opts.anneal_temp,nr);
 +    for(i=0;i<nr;i++) {
 +      ir->opts.annealing[i]=eannNO;
 +      ir->opts.anneal_npoints[i]=0;
 +      ir->opts.anneal_time[i]=NULL;
 +      ir->opts.anneal_temp[i]=NULL;
 +    }
 +    if (nSA > 0) {
 +      bAnneal=FALSE;
 +      for(i=0;i<nr;i++) { 
 +      if(ptr1[i][0]=='n' || ptr1[i][0]=='N') {
 +        ir->opts.annealing[i]=eannNO;
 +      } else if(ptr1[i][0]=='s'|| ptr1[i][0]=='S') {
 +        ir->opts.annealing[i]=eannSINGLE;
 +        bAnneal=TRUE;
 +      } else if(ptr1[i][0]=='p'|| ptr1[i][0]=='P') {
 +        ir->opts.annealing[i]=eannPERIODIC;
 +        bAnneal=TRUE;
 +      } 
 +      } 
 +      if(bAnneal) {
 +      /* Read the other fields too */
 +      nSA_points = str_nelem(anneal_npoints,MAXPTR,ptr1);
 +      if(nSA_points!=nSA) 
 +          gmx_fatal(FARGS,"Found %d annealing-npoints values for %d groups\n",nSA_points,nSA);
 +      for(k=0,i=0;i<nr;i++) {
 +        ir->opts.anneal_npoints[i]=strtol(ptr1[i],NULL,10);
 +        if(ir->opts.anneal_npoints[i]==1)
 +          gmx_fatal(FARGS,"Please specify at least a start and an end point for annealing\n");
 +        snew(ir->opts.anneal_time[i],ir->opts.anneal_npoints[i]);
 +        snew(ir->opts.anneal_temp[i],ir->opts.anneal_npoints[i]);
 +        k += ir->opts.anneal_npoints[i];
 +      }
 +
 +      nSA_time = str_nelem(anneal_time,MAXPTR,ptr1);
 +      if(nSA_time!=k) 
 +          gmx_fatal(FARGS,"Found %d annealing-time values, wanter %d\n",nSA_time,k);
 +      nSA_temp = str_nelem(anneal_temp,MAXPTR,ptr2);
 +      if(nSA_temp!=k) 
 +          gmx_fatal(FARGS,"Found %d annealing-temp values, wanted %d\n",nSA_temp,k);
 +
 +      for(i=0,k=0;i<nr;i++) {
 +        
 +        for(j=0;j<ir->opts.anneal_npoints[i];j++) {
 +          ir->opts.anneal_time[i][j]=strtod(ptr1[k],NULL);
 +          ir->opts.anneal_temp[i][j]=strtod(ptr2[k],NULL);
 +          if(j==0) {
 +            if(ir->opts.anneal_time[i][0] > (ir->init_t+GMX_REAL_EPS))
 +              gmx_fatal(FARGS,"First time point for annealing > init_t.\n");      
 +          } else { 
 +            /* j>0 */
 +            if(ir->opts.anneal_time[i][j]<ir->opts.anneal_time[i][j-1])
 +              gmx_fatal(FARGS,"Annealing timepoints out of order: t=%f comes after t=%f\n",
 +                          ir->opts.anneal_time[i][j],ir->opts.anneal_time[i][j-1]);
 +          }
 +          if(ir->opts.anneal_temp[i][j]<0) 
 +            gmx_fatal(FARGS,"Found negative temperature in annealing: %f\n",ir->opts.anneal_temp[i][j]);    
 +          k++;
 +        }
 +      }
 +      /* Print out some summary information, to make sure we got it right */
 +      for(i=0,k=0;i<nr;i++) {
 +        if(ir->opts.annealing[i]!=eannNO) {
 +          j = groups->grps[egcTC].nm_ind[i];
 +          fprintf(stderr,"Simulated annealing for group %s: %s, %d timepoints\n",
 +                  *(groups->grpname[j]),eann_names[ir->opts.annealing[i]],
 +                  ir->opts.anneal_npoints[i]);
 +          fprintf(stderr,"Time (ps)   Temperature (K)\n");
 +          /* All terms except the last one */
 +          for(j=0;j<(ir->opts.anneal_npoints[i]-1);j++) 
 +              fprintf(stderr,"%9.1f      %5.1f\n",ir->opts.anneal_time[i][j],ir->opts.anneal_temp[i][j]);
 +          
 +          /* Finally the last one */
 +          j = ir->opts.anneal_npoints[i]-1;
 +          if(ir->opts.annealing[i]==eannSINGLE)
 +            fprintf(stderr,"%9.1f-     %5.1f\n",ir->opts.anneal_time[i][j],ir->opts.anneal_temp[i][j]);
 +          else {
 +            fprintf(stderr,"%9.1f      %5.1f\n",ir->opts.anneal_time[i][j],ir->opts.anneal_temp[i][j]);
 +            if(fabs(ir->opts.anneal_temp[i][j]-ir->opts.anneal_temp[i][0])>GMX_REAL_EPS)
 +              warning_note(wi,"There is a temperature jump when your annealing loops back.\n");
 +          }
 +        }
 +      } 
 +      }
 +    }
 +  }   
 +
 +  if (ir->ePull != epullNO) {
 +    make_pull_groups(ir->pull,pull_grp,grps,gnames);
 +  }
 +  
 +  if (ir->bRot) {
 +    make_rotation_groups(ir->rot,rot_grp,grps,gnames);
 +  }
 +
 +  nacc = str_nelem(acc,MAXPTR,ptr1);
 +  nacg = str_nelem(accgrps,MAXPTR,ptr2);
 +  if (nacg*DIM != nacc)
 +    gmx_fatal(FARGS,"Invalid Acceleration input: %d groups and %d acc. values",
 +              nacg,nacc);
 +  do_numbering(natoms,groups,nacg,ptr2,grps,gnames,egcACC,
 +               restnm,egrptpALL_GENREST,bVerbose,wi);
 +  nr = groups->grps[egcACC].nr;
 +  snew(ir->opts.acc,nr);
 +  ir->opts.ngacc=nr;
 +  
 +  for(i=k=0; (i<nacg); i++)
 +    for(j=0; (j<DIM); j++,k++)
 +      ir->opts.acc[i][j]=strtod(ptr1[k],NULL);
 +  for( ;(i<nr); i++)
 +    for(j=0; (j<DIM); j++)
 +      ir->opts.acc[i][j]=0;
 +  
 +  nfrdim  = str_nelem(frdim,MAXPTR,ptr1);
 +  nfreeze = str_nelem(freeze,MAXPTR,ptr2);
 +  if (nfrdim != DIM*nfreeze)
 +    gmx_fatal(FARGS,"Invalid Freezing input: %d groups and %d freeze values",
 +              nfreeze,nfrdim);
 +  do_numbering(natoms,groups,nfreeze,ptr2,grps,gnames,egcFREEZE,
 +               restnm,egrptpALL_GENREST,bVerbose,wi);
 +  nr = groups->grps[egcFREEZE].nr;
 +  ir->opts.ngfrz=nr;
 +  snew(ir->opts.nFreeze,nr);
 +  for(i=k=0; (i<nfreeze); i++)
 +    for(j=0; (j<DIM); j++,k++) {
 +      ir->opts.nFreeze[i][j]=(gmx_strncasecmp(ptr1[k],"Y",1)==0);
 +      if (!ir->opts.nFreeze[i][j]) {
 +      if (gmx_strncasecmp(ptr1[k],"N",1) != 0) {
 +        sprintf(warnbuf,"Please use Y(ES) or N(O) for freezedim only "
 +                "(not %s)", ptr1[k]);
 +        warning(wi,warn_buf);
 +      }
 +      }
 +    }
 +  for( ; (i<nr); i++)
 +    for(j=0; (j<DIM); j++)
 +      ir->opts.nFreeze[i][j]=0;
 +  
 +  nenergy=str_nelem(energy,MAXPTR,ptr1);
 +  do_numbering(natoms,groups,nenergy,ptr1,grps,gnames,egcENER,
 +               restnm,egrptpALL_GENREST,bVerbose,wi);
 +  add_wall_energrps(groups,ir->nwall,symtab);
 +  ir->opts.ngener = groups->grps[egcENER].nr;
 +  nvcm=str_nelem(vcm,MAXPTR,ptr1);
 +  bRest =
 +    do_numbering(natoms,groups,nvcm,ptr1,grps,gnames,egcVCM,
 +                 restnm,nvcm==0 ? egrptpALL_GENREST : egrptpPART,bVerbose,wi);
 +  if (bRest) {
 +    warning(wi,"Some atoms are not part of any center of mass motion removal group.\n"
 +          "This may lead to artifacts.\n"
 +          "In most cases one should use one group for the whole system.");
 +  }
 +
 +  /* Now we have filled the freeze struct, so we can calculate NRDF */ 
 +  calc_nrdf(mtop,ir,gnames);
 +
 +  if (v && NULL) {
 +    real fac,ntot=0;
 +    
 +    /* Must check per group! */
 +    for(i=0; (i<ir->opts.ngtc); i++) 
 +      ntot += ir->opts.nrdf[i];
 +    if (ntot != (DIM*natoms)) {
 +      fac = sqrt(ntot/(DIM*natoms));
 +      if (bVerbose)
 +      fprintf(stderr,"Scaling velocities by a factor of %.3f to account for constraints\n"
 +              "and removal of center of mass motion\n",fac);
 +      for(i=0; (i<natoms); i++)
 +      svmul(fac,v[i],v[i]);
 +    }
 +  }
 +  
 +  nuser=str_nelem(user1,MAXPTR,ptr1);
 +  do_numbering(natoms,groups,nuser,ptr1,grps,gnames,egcUser1,
 +               restnm,egrptpALL_GENREST,bVerbose,wi);
 +  nuser=str_nelem(user2,MAXPTR,ptr1);
 +  do_numbering(natoms,groups,nuser,ptr1,grps,gnames,egcUser2,
 +               restnm,egrptpALL_GENREST,bVerbose,wi);
 +  nuser=str_nelem(xtc_grps,MAXPTR,ptr1);
 +  do_numbering(natoms,groups,nuser,ptr1,grps,gnames,egcXTC,
 +               restnm,egrptpONE,bVerbose,wi);
 +  nofg = str_nelem(orirefitgrp,MAXPTR,ptr1);
 +  do_numbering(natoms,groups,nofg,ptr1,grps,gnames,egcORFIT,
 +               restnm,egrptpALL_GENREST,bVerbose,wi);
 +
 +  /* QMMM input processing */
 +  nQMg          = str_nelem(QMMM,MAXPTR,ptr1);
 +  nQMmethod     = str_nelem(QMmethod,MAXPTR,ptr2);
 +  nQMbasis      = str_nelem(QMbasis,MAXPTR,ptr3);
 +  if((nQMmethod != nQMg)||(nQMbasis != nQMg)){
 +    gmx_fatal(FARGS,"Invalid QMMM input: %d groups %d basissets"
 +            " and %d methods\n",nQMg,nQMbasis,nQMmethod);
 +  }
 +  /* group rest, if any, is always MM! */
 +  do_numbering(natoms,groups,nQMg,ptr1,grps,gnames,egcQMMM,
 +               restnm,egrptpALL_GENREST,bVerbose,wi);
 +  nr = nQMg; /*atoms->grps[egcQMMM].nr;*/
 +  ir->opts.ngQM = nQMg;
 +  snew(ir->opts.QMmethod,nr);
 +  snew(ir->opts.QMbasis,nr);
 +  for(i=0;i<nr;i++){
 +    /* input consists of strings: RHF CASSCF PM3 .. These need to be
 +     * converted to the corresponding enum in names.c
 +     */
 +    ir->opts.QMmethod[i] = search_QMstring(ptr2[i],eQMmethodNR,
 +                                           eQMmethod_names);
 +    ir->opts.QMbasis[i]  = search_QMstring(ptr3[i],eQMbasisNR,
 +                                           eQMbasis_names);
 +
 +  }
 +  nQMmult   = str_nelem(QMmult,MAXPTR,ptr1);
 +  nQMcharge = str_nelem(QMcharge,MAXPTR,ptr2);
 +  nbSH      = str_nelem(bSH,MAXPTR,ptr3);
 +  snew(ir->opts.QMmult,nr);
 +  snew(ir->opts.QMcharge,nr);
 +  snew(ir->opts.bSH,nr);
 +
 +  for(i=0;i<nr;i++){
 +    ir->opts.QMmult[i]   = strtol(ptr1[i],NULL,10);
 +    ir->opts.QMcharge[i] = strtol(ptr2[i],NULL,10);
 +    ir->opts.bSH[i]      = (gmx_strncasecmp(ptr3[i],"Y",1)==0);
 +  }
 +
 +  nCASelec  = str_nelem(CASelectrons,MAXPTR,ptr1);
 +  nCASorb   = str_nelem(CASorbitals,MAXPTR,ptr2);
 +  snew(ir->opts.CASelectrons,nr);
 +  snew(ir->opts.CASorbitals,nr);
 +  for(i=0;i<nr;i++){
 +    ir->opts.CASelectrons[i]= strtol(ptr1[i],NULL,10);
 +    ir->opts.CASorbitals[i] = strtol(ptr2[i],NULL,10);
 +  }
 +  /* special optimization options */
 +
 +  nbOPT = str_nelem(bOPT,MAXPTR,ptr1);
 +  nbTS = str_nelem(bTS,MAXPTR,ptr2);
 +  snew(ir->opts.bOPT,nr);
 +  snew(ir->opts.bTS,nr);
 +  for(i=0;i<nr;i++){
 +    ir->opts.bOPT[i] = (gmx_strncasecmp(ptr1[i],"Y",1)==0);
 +    ir->opts.bTS[i]  = (gmx_strncasecmp(ptr2[i],"Y",1)==0);
 +  }
 +  nSAon     = str_nelem(SAon,MAXPTR,ptr1);
 +  nSAoff    = str_nelem(SAoff,MAXPTR,ptr2);
 +  nSAsteps  = str_nelem(SAsteps,MAXPTR,ptr3);
 +  snew(ir->opts.SAon,nr);
 +  snew(ir->opts.SAoff,nr);
 +  snew(ir->opts.SAsteps,nr);
 +
 +  for(i=0;i<nr;i++){
 +    ir->opts.SAon[i]    = strtod(ptr1[i],NULL);
 +    ir->opts.SAoff[i]   = strtod(ptr2[i],NULL);
 +    ir->opts.SAsteps[i] = strtol(ptr3[i],NULL,10);
 +  }
 +  /* end of QMMM input */
 +
 +  if (bVerbose)
 +    for(i=0; (i<egcNR); i++) {
 +      fprintf(stderr,"%-16s has %d element(s):",gtypes[i],groups->grps[i].nr); 
 +      for(j=0; (j<groups->grps[i].nr); j++)
 +      fprintf(stderr," %s",*(groups->grpname[groups->grps[i].nm_ind[j]]));
 +      fprintf(stderr,"\n");
 +    }
 +
 +  nr = groups->grps[egcENER].nr;
 +  snew(ir->opts.egp_flags,nr*nr);
 +
 +  bExcl = do_egp_flag(ir,groups,"energygrp-excl",egpexcl,EGP_EXCL);
 +    if (bExcl && ir->cutoff_scheme == ecutsVERLET) 
 +    {
 +        warning_error(wi,"Energy group exclusions are not (yet) implemented for the Verlet scheme");
 +    } 
 +  if (bExcl && EEL_FULL(ir->coulombtype))
 +    warning(wi,"Can not exclude the lattice Coulomb energy between energy groups");
 +
 +  bTable = do_egp_flag(ir,groups,"energygrp-table",egptable,EGP_TABLE);
 +  if (bTable && !(ir->vdwtype == evdwUSER) && 
 +      !(ir->coulombtype == eelUSER) && !(ir->coulombtype == eelPMEUSER) &&
 +      !(ir->coulombtype == eelPMEUSERSWITCH))
 +    gmx_fatal(FARGS,"Can only have energy group pair tables in combination with user tables for VdW and/or Coulomb");
 +
 +  decode_cos(efield_x,&(ir->ex[XX]),FALSE);
 +  decode_cos(efield_xt,&(ir->et[XX]),TRUE);
 +  decode_cos(efield_y,&(ir->ex[YY]),FALSE);
 +  decode_cos(efield_yt,&(ir->et[YY]),TRUE);
 +  decode_cos(efield_z,&(ir->ex[ZZ]),FALSE);
 +  decode_cos(efield_zt,&(ir->et[ZZ]),TRUE);
 +
 +  if (ir->bAdress)
 +    do_adress_index(ir->adress,groups,gnames,&(ir->opts),wi);
 +
 +  for(i=0; (i<grps->nr); i++)
 +    sfree(gnames[i]);
 +  sfree(gnames);
 +  done_blocka(grps);
 +  sfree(grps);
 +
 +}
 +
 +
 +
 +static void check_disre(gmx_mtop_t *mtop)
 +{
 +  gmx_ffparams_t *ffparams;
 +  t_functype *functype;
 +  t_iparams  *ip;
 +  int i,ndouble,ftype;
 +  int label,old_label;
 +  
 +  if (gmx_mtop_ftype_count(mtop,F_DISRES) > 0) {
 +    ffparams  = &mtop->ffparams;
 +    functype  = ffparams->functype;
 +    ip        = ffparams->iparams;
 +    ndouble   = 0;
 +    old_label = -1;
 +    for(i=0; i<ffparams->ntypes; i++) {
 +      ftype = functype[i];
 +      if (ftype == F_DISRES) {
 +      label = ip[i].disres.label;
 +      if (label == old_label) {
 +        fprintf(stderr,"Distance restraint index %d occurs twice\n",label);
 +        ndouble++;
 +      }
 +      old_label = label;
 +      }
 +    }
 +    if (ndouble>0)
 +      gmx_fatal(FARGS,"Found %d double distance restraint indices,\n"
 +              "probably the parameters for multiple pairs in one restraint "
 +              "are not identical\n",ndouble);
 +  }
 +}
 +
 +static gmx_bool absolute_reference(t_inputrec *ir,gmx_mtop_t *sys,
 +                                   gmx_bool posres_only,
 +                                   ivec AbsRef)
 +{
 +    int d,g,i;
 +    gmx_mtop_ilistloop_t iloop;
 +    t_ilist *ilist;
 +    int nmol;
 +    t_iparams *pr;
 +
 +    clear_ivec(AbsRef);
 +
 +    if (!posres_only)
 +    {
 +        /* Check the COM */
 +        for(d=0; d<DIM; d++)
 +        {
 +            AbsRef[d] = (d < ndof_com(ir) ? 0 : 1);
 +        }
 +        /* Check for freeze groups */
 +        for(g=0; g<ir->opts.ngfrz; g++)
 +        {
 +            for(d=0; d<DIM; d++)
 +            {
 +                if (ir->opts.nFreeze[g][d] != 0)
 +                {
 +                    AbsRef[d] = 1;
 +                }
 +            }
 +        }
 +    }
 +
 +    /* Check for position restraints */
 +    iloop = gmx_mtop_ilistloop_init(sys);
 +    while (gmx_mtop_ilistloop_next(iloop,&ilist,&nmol))
 +    {
 +        if (nmol > 0 &&
 +            (AbsRef[XX] == 0 || AbsRef[YY] == 0 || AbsRef[ZZ] == 0))
 +        {
 +            for(i=0; i<ilist[F_POSRES].nr; i+=2)
 +            {
 +                pr = &sys->ffparams.iparams[ilist[F_POSRES].iatoms[i]];
 +                for(d=0; d<DIM; d++)
 +                {
 +                    if (pr->posres.fcA[d] != 0)
 +                    {
 +                        AbsRef[d] = 1;
 +                    }
 +                }
 +            }
 +            for(i=0; i<ilist[F_FBPOSRES].nr; i+=2)
 +            {
 +                /* Check for flat-bottom posres */
 +                pr = &sys->ffparams.iparams[ilist[F_FBPOSRES].iatoms[i]];
 +                if (pr->fbposres.k != 0)
 +                {
 +                    switch(pr->fbposres.geom)
 +                    {
 +                    case efbposresSPHERE:
 +                        AbsRef[XX] = AbsRef[YY] = AbsRef[ZZ] = 1;
 +                        break;
 +                    case efbposresCYLINDER:
 +                        AbsRef[XX] = AbsRef[YY] = 1;
 +                        break;
 +                    case efbposresX: /* d=XX */
 +                    case efbposresY: /* d=YY */
 +                    case efbposresZ: /* d=ZZ */
 +                        d = pr->fbposres.geom - efbposresX;
 +                        AbsRef[d] = 1;
 +                        break;
 +                    default:
 +                        gmx_fatal(FARGS," Invalid geometry for flat-bottom position restraint.\n"
 +                                  "Expected nr between 1 and %d. Found %d\n", efbposresNR-1,
 +                                  pr->fbposres.geom);
 +                    }
 +                }
 +            }
 +        }
 +    }
 +
 +    return (AbsRef[XX] != 0 && AbsRef[YY] != 0 && AbsRef[ZZ] != 0);
 +}
 +
 +void triple_check(const char *mdparin,t_inputrec *ir,gmx_mtop_t *sys,
 +                  warninp_t wi)
 +{
 +  char err_buf[256];
 +  int  i,m,g,nmol,npct;
 +  gmx_bool bCharge,bAcc;
 +  real gdt_max,*mgrp,mt;
 +  rvec acc;
 +  gmx_mtop_atomloop_block_t aloopb;
 +  gmx_mtop_atomloop_all_t aloop;
 +  t_atom *atom;
 +  ivec AbsRef;
 +  char warn_buf[STRLEN];
 +
 +  set_warning_line(wi,mdparin,-1);
 +
 +  if (EI_DYNAMICS(ir->eI) && !EI_SD(ir->eI) && ir->eI != eiBD &&
 +      ir->comm_mode == ecmNO &&
 +      !(absolute_reference(ir,sys,FALSE,AbsRef) || ir->nsteps <= 10)) {
 +    warning(wi,"You are not using center of mass motion removal (mdp option comm-mode), numerical rounding errors can lead to build up of kinetic energy of the center of mass");
 +  }
 +
 +    /* Check for pressure coupling with absolute position restraints */
 +    if (ir->epc != epcNO && ir->refcoord_scaling == erscNO)
 +    {
 +        absolute_reference(ir,sys,TRUE,AbsRef);
 +        {
 +            for(m=0; m<DIM; m++)
 +            {
 +                if (AbsRef[m] && norm2(ir->compress[m]) > 0)
 +                {
 +                    warning(wi,"You are using pressure coupling with absolute position restraints, this will give artifacts. Use the refcoord_scaling option.");
 +                    break;
 +                }
 +            }
 +        }
 +    }
 +
 +  bCharge = FALSE;
 +  aloopb = gmx_mtop_atomloop_block_init(sys);
 +  while (gmx_mtop_atomloop_block_next(aloopb,&atom,&nmol)) {
 +    if (atom->q != 0 || atom->qB != 0) {
 +      bCharge = TRUE;
 +    }
 +  }
 +  
 +  if (!bCharge) {
 +    if (EEL_FULL(ir->coulombtype)) {
 +      sprintf(err_buf,
 +            "You are using full electrostatics treatment %s for a system without charges.\n"
 +            "This costs a lot of performance for just processing zeros, consider using %s instead.\n",
 +            EELTYPE(ir->coulombtype),EELTYPE(eelCUT));
 +      warning(wi,err_buf);
 +    }
 +  } else {
 +    if (ir->coulombtype == eelCUT && ir->rcoulomb > 0 && !ir->implicit_solvent) {
 +      sprintf(err_buf,
 +            "You are using a plain Coulomb cut-off, which might produce artifacts.\n"
 +            "You might want to consider using %s electrostatics.\n",
 +            EELTYPE(eelPME));
 +      warning_note(wi,err_buf);
 +    }
 +  }
 +
 +  /* Generalized reaction field */  
 +  if (ir->opts.ngtc == 0) {
 +    sprintf(err_buf,"No temperature coupling while using coulombtype %s",
 +          eel_names[eelGRF]);
 +    CHECK(ir->coulombtype == eelGRF);
 +  }
 +  else {
 +    sprintf(err_buf,"When using coulombtype = %s"
 +          " ref-t for temperature coupling should be > 0",
 +          eel_names[eelGRF]);
 +    CHECK((ir->coulombtype == eelGRF) && (ir->opts.ref_t[0] <= 0));
 +  }
 +
 +    if (ir->eI == eiSD1 &&
 +        (gmx_mtop_ftype_count(sys,F_CONSTR) > 0 ||
 +         gmx_mtop_ftype_count(sys,F_SETTLE) > 0))
 +    {
 +        sprintf(warn_buf,"With constraints integrator %s is less accurate, consider using %s instead",ei_names[ir->eI],ei_names[eiSD2]);
 +        warning_note(wi,warn_buf);
 +    }
 +    
 +  bAcc = FALSE;
 +  for(i=0; (i<sys->groups.grps[egcACC].nr); i++) {
 +    for(m=0; (m<DIM); m++) {
 +      if (fabs(ir->opts.acc[i][m]) > 1e-6) {
 +      bAcc = TRUE;
 +      }
 +    }
 +  }
 +  if (bAcc) {
 +    clear_rvec(acc);
 +    snew(mgrp,sys->groups.grps[egcACC].nr);
 +    aloop = gmx_mtop_atomloop_all_init(sys);
 +    while (gmx_mtop_atomloop_all_next(aloop,&i,&atom)) {
 +      mgrp[ggrpnr(&sys->groups,egcACC,i)] += atom->m;
 +    }
 +    mt = 0.0;
 +    for(i=0; (i<sys->groups.grps[egcACC].nr); i++) {
 +      for(m=0; (m<DIM); m++)
 +      acc[m] += ir->opts.acc[i][m]*mgrp[i];
 +      mt += mgrp[i];
 +    }
 +    for(m=0; (m<DIM); m++) {
 +      if (fabs(acc[m]) > 1e-6) {
 +      const char *dim[DIM] = { "X", "Y", "Z" };
 +      fprintf(stderr,
 +              "Net Acceleration in %s direction, will %s be corrected\n",
 +              dim[m],ir->nstcomm != 0 ? "" : "not");
 +      if (ir->nstcomm != 0 && m < ndof_com(ir)) {
 +        acc[m] /= mt;
 +        for (i=0; (i<sys->groups.grps[egcACC].nr); i++)
 +          ir->opts.acc[i][m] -= acc[m];
 +      }
 +      }
 +    }
 +    sfree(mgrp);
 +  }
 +
 +  if (ir->efep != efepNO && ir->fepvals->sc_alpha != 0 &&
 +      !gmx_within_tol(sys->ffparams.reppow,12.0,10*GMX_DOUBLE_EPS)) {
 +    gmx_fatal(FARGS,"Soft-core interactions are only supported with VdW repulsion power 12");
 +  }
 +
 +  if (ir->ePull != epullNO) {
 +    if (ir->pull->grp[0].nat == 0) {
 +        absolute_reference(ir,sys,FALSE,AbsRef);
 +      for(m=0; m<DIM; m++) {
 +      if (ir->pull->dim[m] && !AbsRef[m]) {
 +        warning(wi,"You are using an absolute reference for pulling, but the rest of the system does not have an absolute reference. This will lead to artifacts.");
 +        break;
 +      }
 +      }
 +    }
 +
 +    if (ir->pull->eGeom == epullgDIRPBC) {
 +      for(i=0; i<3; i++) {
 +      for(m=0; m<=i; m++) {
 +        if ((ir->epc != epcNO && ir->compress[i][m] != 0) ||
 +            ir->deform[i][m] != 0) {
 +          for(g=1; g<ir->pull->ngrp; g++) {
 +            if (ir->pull->grp[g].vec[m] != 0) {
 +              gmx_fatal(FARGS,"Can not have dynamic box while using pull geometry '%s' (dim %c)",EPULLGEOM(ir->pull->eGeom),'x'+m);
 +            }
 +          }
 +        }
 +      }
 +      }
 +    }
 +  }
 +
 +  check_disre(sys);
 +}
 +
 +void double_check(t_inputrec *ir,matrix box,gmx_bool bConstr,warninp_t wi)
 +{
 +  real min_size;
 +  gmx_bool bTWIN;
 +  char warn_buf[STRLEN];
 +  const char *ptr;
 +  
 +  ptr = check_box(ir->ePBC,box);
 +  if (ptr) {
 +      warning_error(wi,ptr);
 +  }  
 +
 +  if (bConstr && ir->eConstrAlg == econtSHAKE) {
 +    if (ir->shake_tol <= 0.0) {
 +      sprintf(warn_buf,"ERROR: shake-tol must be > 0 instead of %g\n",
 +              ir->shake_tol);
 +      warning_error(wi,warn_buf);
 +    }
 +
 +    if (IR_TWINRANGE(*ir) && ir->nstlist > 1) {
 +      sprintf(warn_buf,"With twin-range cut-off's and SHAKE the virial and the pressure are incorrect.");
 +      if (ir->epc == epcNO) {
 +      warning(wi,warn_buf);
 +      } else {
 +          warning_error(wi,warn_buf);
 +      }
 +    }
 +  }
 +
 +  if( (ir->eConstrAlg == econtLINCS) && bConstr) {
 +    /* If we have Lincs constraints: */
 +    if(ir->eI==eiMD && ir->etc==etcNO &&
 +       ir->eConstrAlg==econtLINCS && ir->nLincsIter==1) {
 +      sprintf(warn_buf,"For energy conservation with LINCS, lincs_iter should be 2 or larger.\n");
 +      warning_note(wi,warn_buf);
 +    }
 +    
 +    if ((ir->eI == eiCG || ir->eI == eiLBFGS) && (ir->nProjOrder<8)) {
 +      sprintf(warn_buf,"For accurate %s with LINCS constraints, lincs-order should be 8 or more.",ei_names[ir->eI]);
 +      warning_note(wi,warn_buf);
 +    }
 +    if (ir->epc==epcMTTK) {
 +        warning_error(wi,"MTTK not compatible with lincs -- use shake instead.");
 +    }
 +  }
 +
 +  if (ir->LincsWarnAngle > 90.0) {
 +    sprintf(warn_buf,"lincs-warnangle can not be larger than 90 degrees, setting it to 90.\n");
 +    warning(wi,warn_buf);
 +    ir->LincsWarnAngle = 90.0;
 +  }
 +
 +  if (ir->ePBC != epbcNONE) {
 +    if (ir->nstlist == 0) {
 +      warning(wi,"With nstlist=0 atoms are only put into the box at step 0, therefore drifting atoms might cause the simulation to crash.");
 +    }
 +    bTWIN = (ir->rlistlong > ir->rlist);
 +    if (ir->ns_type == ensGRID) {
 +      if (sqr(ir->rlistlong) >= max_cutoff2(ir->ePBC,box)) {
 +          sprintf(warn_buf,"ERROR: The cut-off length is longer than half the shortest box vector or longer than the smallest box diagonal element. Increase the box size or decrease %s.\n",
 +              bTWIN ? (ir->rcoulomb==ir->rlistlong ? "rcoulomb" : "rvdw"):"rlist");
 +          warning_error(wi,warn_buf);
 +      }
 +    } else {
 +      min_size = min(box[XX][XX],min(box[YY][YY],box[ZZ][ZZ]));
 +      if (2*ir->rlistlong >= min_size) {
 +          sprintf(warn_buf,"ERROR: One of the box lengths is smaller than twice the cut-off length. Increase the box size or decrease rlist.");
 +          warning_error(wi,warn_buf);
 +      if (TRICLINIC(box))
 +        fprintf(stderr,"Grid search might allow larger cut-off's than simple search with triclinic boxes.");
 +      }
 +    }
 +  }
 +}
 +
 +void check_chargegroup_radii(const gmx_mtop_t *mtop,const t_inputrec *ir,
 +                             rvec *x,
 +                             warninp_t wi)
 +{
 +    real rvdw1,rvdw2,rcoul1,rcoul2;
 +    char warn_buf[STRLEN];
 +
 +    calc_chargegroup_radii(mtop,x,&rvdw1,&rvdw2,&rcoul1,&rcoul2);
 +
 +    if (rvdw1 > 0)
 +    {
 +        printf("Largest charge group radii for Van der Waals: %5.3f, %5.3f nm\n",
 +               rvdw1,rvdw2);
 +    }
 +    if (rcoul1 > 0)
 +    {
 +        printf("Largest charge group radii for Coulomb:       %5.3f, %5.3f nm\n",
 +               rcoul1,rcoul2);
 +    }
 +
 +    if (ir->rlist > 0)
 +    {
 +        if (rvdw1  + rvdw2  > ir->rlist ||
 +            rcoul1 + rcoul2 > ir->rlist)
 +        {
 +            sprintf(warn_buf,"The sum of the two largest charge group radii (%f) is larger than rlist (%f)\n",max(rvdw1+rvdw2,rcoul1+rcoul2),ir->rlist);
 +            warning(wi,warn_buf);
 +        }
 +        else
 +        {
 +            /* Here we do not use the zero at cut-off macro,
 +             * since user defined interactions might purposely
 +             * not be zero at the cut-off.
 +             */
 +            if (EVDW_IS_ZERO_AT_CUTOFF(ir->vdwtype) &&
 +                rvdw1 + rvdw2 > ir->rlist - ir->rvdw)
 +            {
 +                sprintf(warn_buf,"The sum of the two largest charge group radii (%f) is larger than rlist (%f) - rvdw (%f)\n",
 +                        rvdw1+rvdw2,
 +                        ir->rlist,ir->rvdw);
 +                if (ir_NVE(ir))
 +                {
 +                    warning(wi,warn_buf);
 +                }
 +                else
 +                {
 +                    warning_note(wi,warn_buf);
 +                }
 +            }
 +            if (EEL_IS_ZERO_AT_CUTOFF(ir->coulombtype) &&
 +                rcoul1 + rcoul2 > ir->rlistlong - ir->rcoulomb)
 +            {
 +                sprintf(warn_buf,"The sum of the two largest charge group radii (%f) is larger than %s (%f) - rcoulomb (%f)\n",
 +                        rcoul1+rcoul2,
 +                        ir->rlistlong > ir->rlist ? "rlistlong" : "rlist",
 +                        ir->rlistlong,ir->rcoulomb);
 +                if (ir_NVE(ir))
 +                {
 +                    warning(wi,warn_buf);
 +                }
 +                else
 +                {
 +                    warning_note(wi,warn_buf);
 +                }
 +            }
 +        }
 +    }
 +}
index 564d61128b09d99daa6df36b9b0b0eb33bb2618b,0000000000000000000000000000000000000000..c4f9c6e7fe96d198a8a215fcb9932a5a1757bc7d
mode 100644,000000..100644
--- /dev/null
@@@ -1,69 -1,0 +1,70 @@@
- void do_edsam(t_inputrec *ir,gmx_large_int_t step,t_mdatoms *md,
 + /*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gromacs Runs On Most of All Computer Systems
 + */
 +
 +#ifndef _edsam_h
 +#define _edsam_h
 +
 +#include "typedefs.h"
 +
 +#ifdef __cplusplus
 +extern "C" {
 +#endif
 +
- gmx_edsam_t ed_open(int nfile,const t_filenm fnm[],unsigned long Flags,t_commrec *cr);
- /* Sets the ED input/output filenames, opens output (.edo) file */
++void do_edsam(t_inputrec *ir,gmx_large_int_t step,
 +                     t_commrec *cr,rvec xs[],rvec v[],matrix box,gmx_edsam_t ed);
 +/* Essential dynamics constraints, called from constrain() */
 +
-                        gmx_edsam_t ed, rvec x[], matrix box);
++gmx_edsam_t ed_open(int natoms, edsamstate_t *EDstate, int nfile,const t_filenm fnm[],
++        unsigned long Flags, const output_env_t oenv, t_commrec *cr);
++/* Sets the ED input/output filenames, opens output file */
 +
 +void init_edsam(gmx_mtop_t *mtop,t_inputrec *ir,t_commrec *cr,
- void do_flood(FILE *log, t_commrec *cr, rvec x[],rvec force[], gmx_edsam_t ed,
++                       gmx_edsam_t ed, rvec x[], matrix box, edsamstate_t *edsamstate);
 +/* Init routine for ED and flooding. Calls init_edi in a loop for every .edi-cycle 
 + * contained in the input file, creates a NULL terminated list of t_edpar structures */
 +
 +void dd_make_local_ed_indices(gmx_domdec_t *dd, gmx_edsam_t ed);
 +/* Make a selection of the home atoms for the ED groups. 
 + * Should be called at every domain decomposition. */
 + 
++void do_flood(t_commrec *cr, t_inputrec *ir, rvec x[],rvec force[], gmx_edsam_t ed,
 +        matrix box, gmx_large_int_t step, gmx_bool bNS);
 +/* Flooding - called from do_force() */
 +
 +#ifdef __cplusplus
 +}
 +#endif
 +
 +#endif        /* _edsam_h */
index d8fc34f1988bf1bffcf0ff0267e83daf155a2215,0000000000000000000000000000000000000000..c379843876cbdd1003d3e88eed88698fea05834f
mode 100644,000000..100644
--- /dev/null
@@@ -1,314 -1,0 +1,311 @@@
- int 
- gmx_dih(int argc,char *argv[]);
 +/*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2008, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 + 
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +
 +#ifndef _gmx_ana_h
 +#define _gmx_ana_h
 +
 +#ifdef __cplusplus
 +extern "C" { 
 +#endif
 +
 +int 
 +gmx_anadock(int argc,char *argv[]);
 +
 +int 
 +gmx_analyze(int argc,char *argv[]);
 +
 +int 
 +gmx_anaeig(int argc,char *argv[]);
 +
 +int 
 +gmx_g_angle(int argc,char *argv[]);
 +
 +int 
 +gmx_bar(int argc,char *argv[]);
 +
 +int 
 +gmx_bond(int argc,char *argv[]);
 +
 +int 
 +gmx_bundle(int argc,char *argv[]);
 +
 +int 
 +gmx_chi(int argc,char *argv[]);
 +
 +int 
 +gmx_cluster(int argc,char *argv[]);
 +
 +int 
 +gmx_confrms(int argc,char *argv[]);
 +
 +int 
 +gmx_covar(int argc,char *argv[]);
 +
 +int 
 +gmx_current(int argc,char *argv[]);
 +
 +int 
 +gmx_density(int argc,char *argv[]);
 +
 +int 
 +gmx_densmap(int argc,char *argv[]);
 +
 +int 
 +gmx_densorder(int argc,char *argv[]);
 +
 +int 
 +gmx_dielectric(int argc,char *argv[]);
 +
 +int 
 +gmx_dipoles(int argc,char *argv[]);
 +
 +int 
 +gmx_disre(int argc,char *argv[]);
 +
 +int 
 +gmx_dist(int argc,char *argv[]);
 +
 +int 
 +gmx_do_dssp(int argc,char *argv[]);
 +
 +int 
 +gmx_dos(int argc,char *argv[]);
 +
 +int 
 +gmx_dyecoupl(int argc,char *argv[]);
 +
 +int 
 +gmx_dyndom(int argc,char *argv[]);
 +
 +int 
 +gmx_editconf(int argc, char *argv[]);
 +
 +int 
 +gmx_eneconv(int argc,char *argv[]);
 +
 +int 
 +gmx_enemat(int argc,char *argv[]);
 +
 +int 
 +gmx_energy(int argc,char *argv[]);
 +
 +int 
 +gmx_lie(int argc,char *argv[]);
 +
 +int 
 +gmx_filter(int argc,char *argv[]);
 +
 +int 
 +gmx_genbox(int argc,char *argv[]);
 +
 +int 
 +gmx_genconf(int argc,char *argv[]);
 +
 +int 
 +gmx_genion(int argc,char *argv[]);
 +
 +int 
 +gmx_genpr(int argc,char *argv[]);
 +
 +int 
 +gmx_gyrate(int argc,char *argv[]);
 +
 +int 
 +gmx_h2order(int argc,char *argv[]);
 +
 +int 
 +gmx_hbond(int argc,char *argv[]);
 +
 +int 
 +gmx_helix(int argc,char *argv[]);
 +
 +int
 +gmx_helixorient(int argc,char *argv[]);
 +
 +int
 +gmx_hydorder(int argc,char *argv[]);
 +
 +int 
 +gmx_kinetics(int argc,char *argv[]);
 +
 +int 
 +gmx_make_edi(int argc,char *argv[]);
 +
 +int 
 +gmx_make_ndx(int argc,char *argv[]);
 +
 +int 
 +gmx_mindist(int argc,char *argv[]);
 +
 +int 
 +gmx_mk_angndx(int argc,char *argv[]);
 +
 +int 
 +gmx_msd(int argc,char *argv[]);
 +
 +int 
 +gmx_morph(int argc,char *argv[]);
 +
 +int 
 +gmx_nmeig(int argc,char *argv[]);
 +
 +int 
 +gmx_nmens(int argc,char *argv[]);
 +
 +int 
 +gmx_nmtraj(int argc,char *argv[]);
 +
 +int 
 +gmx_order(int argc,char *argv[]);
 +
 +int 
 +gmx_polystat(int argc,char *argv[]);
 +
 +int 
 +gmx_potential(int argc,char *argv[]);
 +
 +int
 +gmx_principal(int argc,char *argv[]);
 +
 +int 
 +gmx_rama(int argc,char *argv[]);
 +
 +int 
 +gmx_rdf(int argc,char *argv[]);
 +
 +int 
 +gmx_rotmat(int argc,char *argv[]);
 +
 +int 
 +gmx_rms(int argc,char *argv[]);
 +
 +int 
 +gmx_rmsdist(int argc,char *argv[]);
 +
 +int 
 +gmx_rmsf(int argc,char *argv[]);
 +
 +int 
 +gmx_rotacf(int argc,char *argv[]);
 +
 +int 
 +gmx_saltbr(int argc,char *argv[]);
 +
 +int 
 +gmx_sas(int argc,char *argv[]);
 +
 +int 
 +gmx_sdf(int argc,char *argv[]);
 +
 +int 
 +gmx_select(int argc,char *argv[]);
 +
 +int 
 +gmx_sgangle(int argc,char *argv[]);
 +
 +int 
 +gmx_sham(int argc,char *argv[]);
 +
 +int 
 +gmx_sigeps(int argc,char *argv[]);
 +
 +int 
 +gmx_sorient(int argc,char *argv[]);
 +
 +int 
 +gmx_spol(int argc,char *argv[]);
 +
 +int 
 +gmx_spatial(int argc,char *argv[]);
 +
 +int 
 +gmx_tcaf(int argc,char *argv[]);
 +
 +int 
 +gmx_traj(int argc,char *argv[]);
 +
 +int
 +gmx_trjcat(int argc,char *argv[]);
 +
 +int 
 +gmx_trjconv(int argc,char *argv[]);
 +
 +int 
 +gmx_trjorder(int argc,char *argv[]);
 +
 +int 
 +gmx_tune_pme(int argc,char *argv[]);
 +
 +int 
 +gmx_velacc(int argc,char *argv[]);
 +
 +int 
 +gmx_clustsize(int argc,char *argv[]);
 +
 +int 
 +gmx_mdmat(int argc,char *argv[]);
 +
 +int 
 +gmx_vanhove(int argc,char *argv[]);
 +
 +int 
 +gmx_wham(int argc,char *argv[]);
 +
 +int 
 +gmx_wheel(int argc,char *argv[]);
 +
 +int 
 +gmx_xpm2ps(int argc,char *argv[]);
 +
 +int 
 +gmx_membed(int argc,char *argv[]);
 +
 +int 
 +gmx_pme_error(int argc,char *argv[]);
 +
 +int
 +gmx_options(int argc,char *argv[]);
 +
 +int
 +gmx_sans(int argc,char *argv[]);
 +
 +int
 +gmx_saxs(int argc,char *argv[]);
 +
 +#ifdef __cplusplus
 +}
 +#endif
 +
 +#endif
 +/* _gmx_ana_h */
index 25c42e1db2c7a720a635e747fd515384110e5192,0000000000000000000000000000000000000000..6fe60f824780b5edbda95cd865103d0b1fc24dcc
mode 100644,000000..100644
--- /dev/null
@@@ -1,259 -1,0 +1,295 @@@
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + * This file is part of GROMACS.
 + * Copyright (c) 2012-  
 + *
 + * Written by the Gromacs development team under coordination of
 + * David van der Spoel, Berk Hess, and Erik Lindahl.
 + *
 + * This library is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the research papers on the package. Check out http://www.gromacs.org
 + *
 + * And Hey:
 + * Gnomes, ROck Monsters And Chili Sauce
 + */
 +#ifndef GMX_CPUID_H_
 +#define GMX_CPUID_H_
 +
 +#include <stdio.h>
 +
++
 +#ifdef __cplusplus
 +extern "C" {
 +#endif
 +#if 0
 +} /* fixes auto-indentation problems */
 +#endif
 +
 +
 +/* Currently identifiable CPU Vendors */
 +enum gmx_cpuid_vendor
 +{
 +    GMX_CPUID_VENDOR_CANNOTDETECT,   /* Should only be used if something fails */
 +    GMX_CPUID_VENDOR_UNKNOWN,
 +    GMX_CPUID_VENDOR_INTEL,
 +    GMX_CPUID_VENDOR_AMD,
 +    GMX_CPUID_NVENDORS
 +};
 +
 +
 +/* CPU feature/property list, to be used as indices into the feature array of the
 + * gmxcpuid_t data structure.
 + *
 + * To facilitate looking things up, we keep this list alphabetical.
 + * The list is NOT exhaustive - we have basically added stuff that might be
 + * useful in an application like Gromacs.
 + *
 + * AMD and Intel tend to share most architectural elements, and even if the
 + * flags might have to be detected in different ways (different cpuid registers),
 + * once the flag is present the functions should be identical. Unfortunately the
 + * trend right now (2012) seems to be that they are diverging. This means that
 + * we need to use specific flags to the compiler to maximize performance, and
 + * then the binaries might not be portable between Intel and AMD as they were
 + * before when we only needed to check for SSE and/or SSE2 support in Gromacs.
 + */
 +enum gmx_cpuid_feature
 +{
 +    GMX_CPUID_FEATURE_CANNOTDETECT,      /* Flag set if we could not detect on this CPU  */
 +    GMX_CPUID_FEATURE_X86_AES,           /* x86 advanced encryption standard accel.      */
 +    GMX_CPUID_FEATURE_X86_APIC,          /* APIC support                                 */
 +    GMX_CPUID_FEATURE_X86_AVX,           /* Advanced vector extensions                   */
 +    GMX_CPUID_FEATURE_X86_AVX2,          /* AVX2 including gather support (not used yet) */
 +    GMX_CPUID_FEATURE_X86_CLFSH,         /* Supports CLFLUSH instruction                 */
 +    GMX_CPUID_FEATURE_X86_CMOV,          /* Conditional move insn support                */
 +    GMX_CPUID_FEATURE_X86_CX8,           /* Supports CMPXCHG8B (8-byte compare-exchange) */
 +    GMX_CPUID_FEATURE_X86_CX16,          /* Supports CMPXCHG16B (16-byte compare-exchg)  */
 +    GMX_CPUID_FEATURE_X86_F16C,          /* Supports 16-bit FP conversion instructions   */
 +    GMX_CPUID_FEATURE_X86_FMA,           /* Fused-multiply add support (mainly for AVX)  */
 +    GMX_CPUID_FEATURE_X86_FMA4,          /* 4-operand FMA, only on AMD for now           */
 +    GMX_CPUID_FEATURE_X86_HTT,           /* Hyper-Threading supported                    */
 +    GMX_CPUID_FEATURE_X86_LAHF_LM,       /* LAHF/SAHF support in 64 bits                 */
 +    GMX_CPUID_FEATURE_X86_MISALIGNSSE,   /* Support for misaligned SSE data instructions */
 +    GMX_CPUID_FEATURE_X86_MMX,           /* MMX registers and instructions               */
 +    GMX_CPUID_FEATURE_X86_MSR,           /* Supports Intel model-specific-registers      */
 +    GMX_CPUID_FEATURE_X86_NONSTOP_TSC,   /* Invariant TSC (constant rate in ACPI states) */
 +    GMX_CPUID_FEATURE_X86_PCID,          /* Process context identifier support           */
 +    GMX_CPUID_FEATURE_X86_PCLMULDQ,      /* Carry-less 64-bit multiplication supported   */
 +    GMX_CPUID_FEATURE_X86_PDCM,          /* Perfmon and Debug Capability                 */
 +    GMX_CPUID_FEATURE_X86_PDPE1GB,       /* Support for 1GB pages                        */
 +    GMX_CPUID_FEATURE_X86_POPCNT,        /* Supports the POPCNT (population count) insn  */
 +    GMX_CPUID_FEATURE_X86_PSE,           /* Supports 4MB-pages (page size extension)     */
 +    GMX_CPUID_FEATURE_X86_RDRND,         /* RDRAND high-quality hardware random numbers  */
 +    GMX_CPUID_FEATURE_X86_RDTSCP,        /* Serializing rdtscp instruction available     */
 +    GMX_CPUID_FEATURE_X86_SSE2,          /* SSE 2                                        */
 +    GMX_CPUID_FEATURE_X86_SSE3,          /* SSE 3                                        */
 +    GMX_CPUID_FEATURE_X86_SSE4A,         /* SSE 4A                                       */
 +    GMX_CPUID_FEATURE_X86_SSE4_1,        /* SSE 4.1                                      */
 +    GMX_CPUID_FEATURE_X86_SSE4_2,        /* SSE 4.2                                      */
 +    GMX_CPUID_FEATURE_X86_SSSE3,         /* Supplemental SSE3                            */
 +    GMX_CPUID_FEATURE_X86_TDT,           /* TSC deadline timer                           */
 +    GMX_CPUID_FEATURE_X86_X2APIC,        /* Extended xAPIC Support                       */
 +    GMX_CPUID_FEATURE_X86_XOP,           /* AMD extended instructions, only AMD for now  */
 +    GMX_CPUID_NFEATURES
 +};
 +
 +
 +/* Currently supported acceleration instruction sets, intrinsics or other similar combinations
 + * in Gromacs. There is not always a 1-to-1 correspondence with feature flags; on some AMD
 + * hardware we prefer to use 128bit AVX instructions (although 256-bit ones could be executed),
 + * and we still haven't written the AVX2 kernels.
 + */
 +enum gmx_cpuid_acceleration
 +{
 +    GMX_CPUID_ACCELERATION_CANNOTDETECT,    /* Should only be used if something fails */
 +    GMX_CPUID_ACCELERATION_NONE,
 +    GMX_CPUID_ACCELERATION_X86_SSE2,
 +    GMX_CPUID_ACCELERATION_X86_SSE4_1,
 +    GMX_CPUID_ACCELERATION_X86_AVX_128_FMA,
 +    GMX_CPUID_ACCELERATION_X86_AVX_256,
 +    GMX_CPUID_NACCELERATIONS
 +};
 +
 +/* Text strings corresponding to CPU vendors */
 +extern const char *
 +gmx_cpuid_vendor_string[GMX_CPUID_NVENDORS];
 +
 +/* Text strings for CPU feature indices */
 +extern const char *
 +gmx_cpuid_feature_string[GMX_CPUID_NFEATURES];
 +
 +/* Text strings for Gromacs acceleration/instruction sets */
 +extern const char *
 +gmx_cpuid_acceleration_string[GMX_CPUID_NACCELERATIONS];
 +
 +
 +/* Abstract data type with CPU detection information. Set by gmx_cpuid_init(). */
 +typedef struct gmx_cpuid *
 +gmx_cpuid_t;
 +
 +
 +/* Fill the data structure by using CPU detection instructions.
 + * Return 0 on success, 1 if something bad happened.
 + */
 +int
 +gmx_cpuid_init              (gmx_cpuid_t *              cpuid);
 +
 +
 +/* Return the vendor id as enumerated type. Use gmx_cpuid_vendor_string[]
 + * to get the corresponding text string.
 + */
 +enum gmx_cpuid_vendor
 +gmx_cpuid_vendor            (gmx_cpuid_t                cpuid);
 +
 +
 +/* Return a constant pointer to the processor brand string. */
 +const char *
 +gmx_cpuid_brand             (gmx_cpuid_t                cpuid);
 +
 +
 +/* Return processor family version. For a chip of version 1.2.3, this is 1 */
 +int
 +gmx_cpuid_family            (gmx_cpuid_t                cpuid);
 +
 +/* Return processor model version, For a chip of version 1.2.3, this is 2. */
 +int
 +gmx_cpuid_model             (gmx_cpuid_t                cpuid);
 +
 +/* Return processor stepping version, For a chip of version 1.2.3, this is 3. */
 +int
 +gmx_cpuid_stepping          (gmx_cpuid_t                cpuid);
 +
 +
 +/* Check whether a particular CPUID feature is set.
 + * Returns 0 if flag "feature" is not set, 1 if the flag is set. We cannot use
 + * gmx_bool here since this file must be possible to compile without simple.h.
 + */
 +int
 +gmx_cpuid_feature           (gmx_cpuid_t                cpuid,
 +                             enum gmx_cpuid_feature     feature);
 +
 +
++/* Return pointers to cpu topology information.
++ * 
++ * Important: CPU topology requires more OS support than most other
++ * functions in this file, including support for thread pinning to hardware.
++ * This means it will not work on some platforms, including e.g. Mac OS X.
++ * Thus, it is IMPERATIVE that you check the return value from this routine
++ * before doing anything with the information. It is only if the return
++ * value is zero that the data is valid.
++ *
++ * For the returned values we have:
++ * - nprocessors         Total number of logical processors reported by OS
++ * - npackages           Usually number of CPU sockets
++ * - ncores_per_package  Number of cores in each package
++ * - nhwthreads_per_core Number of hardware threads per core; 2 for hyperthreading.
++ * - package_id          Array with the package index for each logical cpu
++ * - core_id             Array with local core index for each logical cpu
++ * - hwthread_id         Array with local hwthread index for each logical cpu
++ * - locality_order      Array with logical cpu numbers, sorted in order
++ *                       of physical and logical locality in the system.
++ *
++ * All arrays are of length nprocessors.
++ */
++int
++gmx_cpuid_topology(gmx_cpuid_t        cpuid,
++                   int *              nprocessors,
++                   int *              npackages,
++                   int *              ncores_per_package,
++                   int *              nhwthreads_per_core,
++                   const int **       package_id,
++                   const int **       core_id,
++                   const int **       hwthread_id,
++                   const int **       locality_order);
++
 +/* Enumerated values for x86 SMT enabled-status. Note that this does not refer
 + * to Hyper-Threading support (that is the flag GMX_CPUID_FEATURE_X86_HTT), but
 + * whether Hyper-Threading is _enabled_ and _used_ in bios right now.
 + */
 +enum gmx_cpuid_x86_smt
 +{
 +    GMX_CPUID_X86_SMT_CANNOTDETECT,
 +    GMX_CPUID_X86_SMT_DISABLED,
 +    GMX_CPUID_X86_SMT_ENABLED
 +};
 +
 +/* Returns the status of x86 SMT support. IMPORTANT: There are non-zero
 + * return values for this routine that still do not indicate supported and
 + * enabled smt/Hyper-Threading. You need to carefully check the return value
 + * against the enumerated type values to see what you are getting.
 + *
 + * Long-term, this functionality will move to a new hardware topology detection
 + * layer, but that will require a lot of new code and a working interface to the
 + * hwloc library. Surprisingly, there is no simple way to find out that
 + * Hyper-Threading is actually turned on without fully enumerating and checking
 + * all the cores, which we presently can only do on Linux. This means a couple
 + * of things:
 + *
 + * 1) If you want to know whether your CPU _supports_ Hyper-Threading in the
 + *    first place, check the GMX_CPUID_FEATURE_X86_HTT flag instead!
 + * 2) There are several scenarios where this routine will say that it cannot
 + *    detect whether SMT is enabled and used right now.
 + * 3) If you need support on non-Linux x86, you have to write it :-)
 + * 4) Don't invest too much efforts, since this will be replaced with
 + *    full hardware topology detection in the future.
 + * 5) Don't worry if the detection does not work. It is not a catastrophe, but
 + *    but we get slightly better performance on x86 if we use Hyper-Threading
 + *    cores in direct space, but not reciprocal space.
 + *
 + * Since this routine presently only supports Hyper-Threading we say X86_SMT
 + * in order not to give the impression we can detect any SMT. We haven't
 + * even tested the performance on other SMT implementations, so it is not
 + * obvious we shouldn't use SMT there.
++ *
++ * Note that you can get more complete topology information from
++ * gmx_cpuid_topology(), although that requires slightly more OS support.
 + */
 +enum gmx_cpuid_x86_smt
 +gmx_cpuid_x86_smt(gmx_cpuid_t cpuid);
 +
 +
 +/* Formats a text string (up to n characters) from the data structure.
 + * The output will have max 80 chars between newline characters.
 + */
 +int
 +gmx_cpuid_formatstring      (gmx_cpuid_t                cpuid,
 +                             char *                     s,
 +                             int                        n);
 +
 +
 +/* Suggests a suitable gromacs acceleration based on the support in the
 + * hardware.
 + */
 +enum gmx_cpuid_acceleration
 +gmx_cpuid_acceleration_suggest  (gmx_cpuid_t                    cpuid);
 +
 +
 +/* Check if this binary was compiled with the same acceleration as we
 + * would suggest for the current hardware. Always print stats to the log file
 + * if it is non-NULL, and print a warning in stdout if we don't have a match.
 + */
 +int
 +gmx_cpuid_acceleration_check    (gmx_cpuid_t                cpuid,
 +                                 FILE *                     log);
 +
 +
 +/* Release resources used by data structure. Note that the pointer to the
 + * CPU brand string will no longer be valid once this routine has been called.
 + */
 +void
 +gmx_cpuid_done              (gmx_cpuid_t                cpuid);
 +
 +
 +
 +
 +#ifdef __cplusplus
 +}
 +#endif
 +
 +
 +#endif /* GMX_CPUID_H_ */
index b47f577be08ea8cdcfc4b85cac49a84aed430e60,0000000000000000000000000000000000000000..02390c1fcc2c942705e6228d2a76561195151b97
mode 100644,000000..100644
--- /dev/null
@@@ -1,1328 -1,0 +1,1336 @@@
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + * This file is part of GROMACS.
 + * Copyright (c) 2012-  
 + *
 + * Written by the Gromacs development team under coordination of
 + * David van der Spoel, Berk Hess, and Erik Lindahl.
 + *
 + * This library is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the research papers on the package. Check out http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gnomes, ROck Monsters And Chili Sauce
 + */
 +#ifndef _gmx_math_x86_avx_128_fma_double_h_
 +#define _gmx_math_x86_avx_128_fma_double_h_
 +
++#include <immintrin.h> /* AVX */
++#ifdef HAVE_X86INTRIN_H
++#include <x86intrin.h> /* FMA */
++#endif
++#ifdef HAVE_INTRIN_H
++#include <intrin.h> /* FMA MSVC */
++#endif
++
 +#include <math.h>
 +
 +#include "gmx_x86_avx_128_fma.h"
 +
 +
 +#ifndef M_PI
 +#  define M_PI 3.14159265358979323846264338327950288
 +#endif
 +
 +
 +/************************
 + *                      *
 + * Simple math routines *
 + *                      *
 + ************************/
 +
 +/* 1.0/sqrt(x) */
 +static gmx_inline __m128d
 +gmx_mm_invsqrt_pd(__m128d x)
 +{
 +    const __m128d half  = _mm_set1_pd(0.5);
 +    const __m128d three = _mm_set1_pd(3.0);
 +
 +    /* Lookup instruction only exists in single precision, convert back and forth... */
 +    __m128d lu = _mm_cvtps_pd(_mm_rsqrt_ps( _mm_cvtpd_ps(x)));
 +
 +    lu = _mm_mul_pd(_mm_mul_pd(half,lu),_mm_nmacc_pd(_mm_mul_pd(lu,lu),x,three));
 +    return _mm_mul_pd(_mm_mul_pd(half,lu),_mm_nmacc_pd(_mm_mul_pd(lu,lu),x,three));
 +}
 +
 +/* 1.0/sqrt(x), done for a pair of arguments to improve throughput */
 +static void
 +gmx_mm_invsqrt_pair_pd(__m128d x1, __m128d x2, __m128d *invsqrt1, __m128d *invsqrt2)
 +{
 +    const __m128d half   = _mm_set1_pd(0.5);
 +    const __m128d three  = _mm_set1_pd(3.0);
 +    const __m128  halff  = _mm_set1_ps(0.5f);
 +    const __m128  threef = _mm_set1_ps(3.0f);
 +    
 +    __m128 xf,luf;
 +    __m128d lu1,lu2;
 +    
 +    /* Do first N-R step in float for 2x throughput */
 +    xf  = _mm_shuffle_ps(_mm_cvtpd_ps(x1),_mm_cvtpd_ps(x2),_MM_SHUFFLE(1,0,1,0));
 +    luf = _mm_rsqrt_ps(xf);
 +    
 +    luf = _mm_mul_ps(_mm_mul_ps(halff,luf),_mm_nmacc_ps(_mm_mul_ps(luf,luf),xf,threef));
 +
 +    
 +    lu2 = _mm_cvtps_pd(_mm_shuffle_ps(luf,luf,_MM_SHUFFLE(3,2,3,2)));
 +    lu1 = _mm_cvtps_pd(luf);
 +    
 +    *invsqrt1 = _mm_mul_pd(_mm_mul_pd(half,lu1),_mm_nmacc_pd(_mm_mul_pd(lu1,lu1),x1,three));
 +    *invsqrt2 = _mm_mul_pd(_mm_mul_pd(half,lu2),_mm_nmacc_pd(_mm_mul_pd(lu2,lu2),x2,three));
 +}
 +
 +/* sqrt(x) - Do NOT use this (but rather invsqrt) if you actually need 1.0/sqrt(x) */
 +static gmx_inline __m128d
 +gmx_mm_sqrt_pd(__m128d x)
 +{
 +    __m128d mask;
 +    __m128d res;
 +
 +    mask = _mm_cmpeq_pd(x,_mm_setzero_pd());
 +    res  = _mm_andnot_pd(mask,gmx_mm_invsqrt_pd(x));
 +
 +    res  = _mm_mul_pd(x,res);
 +
 +    return res;
 +}
 +
 +/* 1.0/x */
 +static gmx_inline __m128d
 +gmx_mm_inv_pd(__m128d x)
 +{
 +    const __m128d two  = _mm_set1_pd(2.0);
 +
 +    /* Lookup instruction only exists in single precision, convert back and forth... */
 +    __m128d lu = _mm_cvtps_pd(_mm_rcp_ps( _mm_cvtpd_ps(x)));
 +
 +    /* Perform two N-R steps for double precision */
 +    lu         = _mm_mul_pd(lu,_mm_nmacc_pd(lu,x,two));
 +    return _mm_mul_pd(lu,_mm_nmacc_pd(lu,x,two));
 +}
 +
 +static gmx_inline __m128d
 +gmx_mm_abs_pd(__m128d x)
 +{
 +    const __m128d signmask  = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
 +
 +    return _mm_and_pd(x,signmask);
 +}
 +
 +
 +/*
 + * 2^x function.
 + *
 + * The 2^w term is calculated from a (6,0)-th order (no denominator) Minimax polynomia on the interval
 + * [-0.5,0.5].
 + *
 + * The approximation on [-0.5,0.5] is a rational Padé approximation, 1+2*P(x^2)/(Q(x^2)-P(x^2)),
 + * according to the same algorithm as used in the Cephes/netlib math routines.
 + */
 +static __m128d
 +gmx_mm_exp2_pd(__m128d x)
 +{
 +    /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
 +    const __m128d arglimit = _mm_set1_pd(1022.0);
 +    const __m128i expbase  = _mm_set1_epi32(1023);
 +
 +    const __m128d P2       = _mm_set1_pd(2.30933477057345225087e-2);
 +    const __m128d P1       = _mm_set1_pd(2.02020656693165307700e1);
 +    const __m128d P0       = _mm_set1_pd(1.51390680115615096133e3);
 +    /* Q2 == 1.0 */
 +    const __m128d Q1       = _mm_set1_pd(2.33184211722314911771e2);
 +    const __m128d Q0       = _mm_set1_pd(4.36821166879210612817e3);
 +    const __m128d one      = _mm_set1_pd(1.0);
 +    const __m128d two      = _mm_set1_pd(2.0);
 +
 +    __m128d valuemask;
 +    __m128i iexppart;
 +    __m128d fexppart;
 +    __m128d intpart;
 +    __m128d z,z2;
 +    __m128d PolyP,PolyQ;
 +
 +    iexppart  = _mm_cvtpd_epi32(x);
 +    intpart   = _mm_round_pd(x,_MM_FROUND_TO_NEAREST_INT);
 +
 +    /* The two lowest elements of iexppart now contains 32-bit numbers with a correctly biased exponent.
 +     * To be able to shift it into the exponent for a double precision number we first need to
 +     * shuffle so that the lower half contains the first element, and the upper half the second.
 +     * This should really be done as a zero-extension, but since the next instructions will shift
 +     * the registers left by 52 bits it doesn't matter what we put there - it will be shifted out.
 +     * (thus we just use element 2 from iexppart).
 +     */
 +    iexppart  = _mm_shuffle_epi32(iexppart,_MM_SHUFFLE(2,1,2,0));
 +
 +    /* Do the shift operation on the 64-bit registers */
 +    iexppart  = _mm_add_epi32(iexppart,expbase);
 +    iexppart  = _mm_slli_epi64(iexppart,52);
 +
 +    valuemask = _mm_cmpge_pd(arglimit,gmx_mm_abs_pd(x));
 +    fexppart  = _mm_and_pd(valuemask,gmx_mm_castsi128_pd(iexppart));
 +
 +    z         = _mm_sub_pd(x,intpart);
 +    z2        = _mm_mul_pd(z,z);
 +
 +    PolyP     = _mm_macc_pd(P2,z2,P1);
 +    PolyQ     = _mm_add_pd(z2,Q1);
 +    PolyP     = _mm_macc_pd(PolyP,z2,P0);
 +    PolyQ     = _mm_macc_pd(PolyQ,z2,Q0);
 +    PolyP     = _mm_mul_pd(PolyP,z);
 +
 +    z         = _mm_mul_pd(PolyP,gmx_mm_inv_pd(_mm_sub_pd(PolyQ,PolyP)));
 +    z         = _mm_macc_pd(two,z,one);
 +
 +    z         = _mm_mul_pd(z,fexppart);
 +
 +    return  z;
 +}
 +
 +/* Exponential function. This could be calculated from 2^x as Exp(x)=2^(y), where y=log2(e)*x,
 + * but there will then be a small rounding error since we lose some precision due to the
 + * multiplication. This will then be magnified a lot by the exponential.
 + *
 + * Instead, we calculate the fractional part directly as a Padé approximation of
 + * Exp(z) on [-0.5,0.5]. We use extended precision arithmetics to calculate the fraction
 + * remaining after 2^y, which avoids the precision-loss.
 + */
 +static __m128d
 +gmx_mm_exp_pd(__m128d exparg)
 +{
 +    const __m128d argscale = _mm_set1_pd(1.4426950408889634073599);
 +    /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
 +    const __m128d arglimit = _mm_set1_pd(1022.0);
 +    const __m128i expbase  = _mm_set1_epi32(1023);
 +
 +    const __m128d invargscale0  = _mm_set1_pd(6.93145751953125e-1);
 +    const __m128d invargscale1  = _mm_set1_pd(1.42860682030941723212e-6);
 +
 +    const __m128d P2       = _mm_set1_pd(1.26177193074810590878e-4);
 +    const __m128d P1       = _mm_set1_pd(3.02994407707441961300e-2);
 +    /* P0 == 1.0 */
 +    const __m128d Q3       = _mm_set1_pd(3.00198505138664455042E-6);
 +    const __m128d Q2       = _mm_set1_pd(2.52448340349684104192E-3);
 +    const __m128d Q1       = _mm_set1_pd(2.27265548208155028766E-1);
 +    /* Q0 == 2.0 */
 +    const __m128d one      = _mm_set1_pd(1.0);
 +    const __m128d two      = _mm_set1_pd(2.0);
 +
 +    __m128d valuemask;
 +    __m128i iexppart;
 +    __m128d fexppart;
 +    __m128d intpart;
 +    __m128d x,z,z2;
 +    __m128d PolyP,PolyQ;
 +
 +    x             = _mm_mul_pd(exparg,argscale);
 +
 +    iexppart  = _mm_cvtpd_epi32(x);
 +    intpart   = _mm_round_pd(x,_MM_FROUND_TO_NEAREST_INT);
 +
 +    /* The two lowest elements of iexppart now contains 32-bit numbers with a correctly biased exponent.
 +     * To be able to shift it into the exponent for a double precision number we first need to
 +     * shuffle so that the lower half contains the first element, and the upper half the second.
 +     * This should really be done as a zero-extension, but since the next instructions will shift
 +     * the registers left by 52 bits it doesn't matter what we put there - it will be shifted out.
 +     * (thus we just use element 2 from iexppart).
 +     */
 +    iexppart  = _mm_shuffle_epi32(iexppart,_MM_SHUFFLE(2,1,2,0));
 +
 +    /* Do the shift operation on the 64-bit registers */
 +    iexppart  = _mm_add_epi32(iexppart,expbase);
 +    iexppart  = _mm_slli_epi64(iexppart,52);
 +
 +    valuemask = _mm_cmpge_pd(arglimit,gmx_mm_abs_pd(x));
 +    fexppart  = _mm_and_pd(valuemask,gmx_mm_castsi128_pd(iexppart));
 +
 +    z         = _mm_sub_pd(exparg,_mm_mul_pd(invargscale0,intpart));
 +    z         = _mm_sub_pd(z,_mm_mul_pd(invargscale1,intpart));
 +
 +    z2        = _mm_mul_pd(z,z);
 +
 +    PolyQ     = _mm_macc_pd(Q3,z2,Q2);
 +    PolyP     = _mm_macc_pd(P2,z2,P1);
 +    PolyQ     = _mm_macc_pd(PolyQ,z2,Q1);
 +
 +    PolyP     = _mm_macc_pd(PolyP,z2,one);
 +    PolyQ     = _mm_macc_pd(PolyQ,z2,two);
 +
 +    PolyP     = _mm_mul_pd(PolyP,z);
 +
 +    z         = _mm_mul_pd(PolyP,gmx_mm_inv_pd(_mm_sub_pd(PolyQ,PolyP)));
 +    z         = _mm_macc_pd(two,z,one);
 +
 +    z         = _mm_mul_pd(z,fexppart);
 +
 +    return  z;
 +}
 +
 +
 +
 +static __m128d
 +gmx_mm_log_pd(__m128d x)
 +{
 +    /* Same algorithm as cephes library */
 +    const __m128d expmask    = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FF00000, 0x00000000, 0x7FF00000, 0x00000000) );
 +
 +    const __m128i expbase_m1 = _mm_set1_epi32(1023-1); /* We want non-IEEE format */
 +
 +    const __m128d half       = _mm_set1_pd(0.5);
 +    const __m128d one        = _mm_set1_pd(1.0);
 +    const __m128d two        = _mm_set1_pd(2.0);
 +    const __m128d invsq2     = _mm_set1_pd(1.0/sqrt(2.0));
 +
 +    const __m128d corr1      = _mm_set1_pd(-2.121944400546905827679e-4);
 +    const __m128d corr2      = _mm_set1_pd(0.693359375);
 +
 +    const __m128d P5         = _mm_set1_pd(1.01875663804580931796e-4);
 +    const __m128d P4         = _mm_set1_pd(4.97494994976747001425e-1);
 +    const __m128d P3         = _mm_set1_pd(4.70579119878881725854e0);
 +    const __m128d P2         = _mm_set1_pd(1.44989225341610930846e1);
 +    const __m128d P1         = _mm_set1_pd(1.79368678507819816313e1);
 +    const __m128d P0         = _mm_set1_pd(7.70838733755885391666e0);
 +
 +    const __m128d Q4         = _mm_set1_pd(1.12873587189167450590e1);
 +    const __m128d Q3         = _mm_set1_pd(4.52279145837532221105e1);
 +    const __m128d Q2         = _mm_set1_pd(8.29875266912776603211e1);
 +    const __m128d Q1         = _mm_set1_pd(7.11544750618563894466e1);
 +    const __m128d Q0         = _mm_set1_pd(2.31251620126765340583e1);
 +
 +    const __m128d R2         = _mm_set1_pd(-7.89580278884799154124e-1);
 +    const __m128d R1         = _mm_set1_pd(1.63866645699558079767e1);
 +    const __m128d R0         = _mm_set1_pd(-6.41409952958715622951e1);
 +
 +    const __m128d S2         = _mm_set1_pd(-3.56722798256324312549E1);
 +    const __m128d S1         = _mm_set1_pd(3.12093766372244180303E2);
 +    const __m128d S0         = _mm_set1_pd(-7.69691943550460008604E2);
 +
 +    __m128d fexp;
 +    __m128i iexp;
 +
 +    __m128d mask1,mask2;
 +    __m128d corr,t1,t2,q;
 +    __m128d zA,yA,xA,zB,yB,xB,z;
 +    __m128d polyR,polyS;
 +    __m128d polyP1,polyP2,polyQ1,polyQ2;
 +
 +    /* Separate x into exponent and mantissa, with a mantissa in the range [0.5..1[ (not IEEE754 standard!) */
 +    fexp   = _mm_and_pd(x,expmask);
 +    iexp   = gmx_mm_castpd_si128(fexp);
 +    iexp   = _mm_srli_epi64(iexp,52);
 +    iexp   = _mm_sub_epi32(iexp,expbase_m1);
 +    iexp   = _mm_shuffle_epi32(iexp, _MM_SHUFFLE(1,1,2,0) );
 +    fexp   = _mm_cvtepi32_pd(iexp);
 +
 +    x      = _mm_andnot_pd(expmask,x);
 +    x      = _mm_or_pd(x,one);
 +    x      = _mm_mul_pd(x,half);
 +
 +    mask1     = _mm_cmpgt_pd(gmx_mm_abs_pd(fexp),two);
 +    mask2     = _mm_cmplt_pd(x,invsq2);
 +
 +    fexp   = _mm_sub_pd(fexp,_mm_and_pd(mask2,one));
 +
 +    /* If mask1 is set ('A') */
 +    zA     = _mm_sub_pd(x,half);
 +    t1     = _mm_blendv_pd( zA,x,mask2 );
 +    zA     = _mm_sub_pd(t1,half);
 +    t2     = _mm_blendv_pd( x,zA,mask2 );
 +    yA     = _mm_mul_pd(half,_mm_add_pd(t2,one));
 +
 +    xA     = _mm_mul_pd(zA,gmx_mm_inv_pd(yA));
 +    zA     = _mm_mul_pd(xA,xA);
 +
 +    /* EVALUATE POLY */
 +    polyR  = _mm_macc_pd(R2,zA,R1);
 +    polyR  = _mm_macc_pd(polyR,zA,R0);
 +
 +    polyS  = _mm_add_pd(zA,S2);
 +    polyS  = _mm_macc_pd(polyS,zA,S1);
 +    polyS  = _mm_macc_pd(polyS,zA,S0);
 +
 +    q      = _mm_mul_pd(polyR,gmx_mm_inv_pd(polyS));
 +    zA     = _mm_mul_pd(_mm_mul_pd(xA,zA),q);
 +
 +    zA     = _mm_macc_pd(corr1,fexp,zA);
 +    zA     = _mm_add_pd(zA,xA);
 +    zA     = _mm_macc_pd(corr2,fexp,zA);
 +
 +    /* If mask1 is not set ('B') */
 +    corr   = _mm_and_pd(mask2,x);
 +    xB     = _mm_add_pd(x,corr);
 +    xB     = _mm_sub_pd(xB,one);
 +    zB     = _mm_mul_pd(xB,xB);
 +
 +    polyP1 = _mm_macc_pd(P5,zB,P3);
 +    polyP2 = _mm_macc_pd(P4,zB,P2);
 +    polyP1 = _mm_macc_pd(polyP1,zB,P1);
 +    polyP2 = _mm_macc_pd(polyP2,zB,P0);
 +    polyP1 = _mm_macc_pd(polyP1,xB,polyP2);
 +
 +    polyQ2 = _mm_macc_pd(Q4,zB,Q2);
 +    polyQ1 = _mm_add_pd(zB,Q3);
 +    polyQ1 = _mm_macc_pd(polyQ1,zB,Q1);
 +    polyQ2 = _mm_macc_pd(polyQ2,zB,Q0);
 +    polyQ1 = _mm_macc_pd(polyQ1,xB,polyQ2);
 +
 +    fexp   = _mm_and_pd(fexp,_mm_cmpneq_pd(fexp,_mm_setzero_pd()));
 +
 +    q      = _mm_mul_pd(polyP1,gmx_mm_inv_pd(polyQ1));
 +    yB     = _mm_macc_pd(_mm_mul_pd(xB,zB),q,_mm_mul_pd(corr1,fexp));
 +
 +    yB     = _mm_nmacc_pd(half,zB,yB);
 +    zB     = _mm_add_pd(xB,yB);
 +    zB     = _mm_macc_pd(corr2,fexp,zB);
 +
 +    z      = _mm_blendv_pd( zB,zA,mask1 );
 +
 +    return z;
 +}
 +
 +
 +
 +static __m128d
 +gmx_mm_erf_pd(__m128d x)
 +{
 +    /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
 +    const __m128d CAP4      = _mm_set1_pd(-0.431780540597889301512e-4);
 +    const __m128d CAP3      = _mm_set1_pd(-0.00578562306260059236059);
 +    const __m128d CAP2      = _mm_set1_pd(-0.028593586920219752446);
 +    const __m128d CAP1      = _mm_set1_pd(-0.315924962948621698209);
 +    const __m128d CAP0      = _mm_set1_pd(0.14952975608477029151);
 +
 +    const __m128d CAQ5      = _mm_set1_pd(-0.374089300177174709737e-5);
 +    const __m128d CAQ4      = _mm_set1_pd(0.00015126584532155383535);
 +    const __m128d CAQ3      = _mm_set1_pd(0.00536692680669480725423);
 +    const __m128d CAQ2      = _mm_set1_pd(0.0668686825594046122636);
 +    const __m128d CAQ1      = _mm_set1_pd(0.402604990869284362773);
 +    /* CAQ0 == 1.0 */
 +    const __m128d CAoffset  = _mm_set1_pd(0.9788494110107421875);
 +
 +    /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
 +    const __m128d CBP6      = _mm_set1_pd(2.49650423685462752497647637088e-10);
 +    const __m128d CBP5      = _mm_set1_pd(0.00119770193298159629350136085658);
 +    const __m128d CBP4      = _mm_set1_pd(0.0164944422378370965881008942733);
 +    const __m128d CBP3      = _mm_set1_pd(0.0984581468691775932063932439252);
 +    const __m128d CBP2      = _mm_set1_pd(0.317364595806937763843589437418);
 +    const __m128d CBP1      = _mm_set1_pd(0.554167062641455850932670067075);
 +    const __m128d CBP0      = _mm_set1_pd(0.427583576155807163756925301060);
 +    const __m128d CBQ7      = _mm_set1_pd(0.00212288829699830145976198384930);
 +    const __m128d CBQ6      = _mm_set1_pd(0.0334810979522685300554606393425);
 +    const __m128d CBQ5      = _mm_set1_pd(0.2361713785181450957579508850717);
 +    const __m128d CBQ4      = _mm_set1_pd(0.955364736493055670530981883072);
 +    const __m128d CBQ3      = _mm_set1_pd(2.36815675631420037315349279199);
 +    const __m128d CBQ2      = _mm_set1_pd(3.55261649184083035537184223542);
 +    const __m128d CBQ1      = _mm_set1_pd(2.93501136050160872574376997993);
 +    /* CBQ0 == 1.0 */
 +
 +    /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
 +    const __m128d CCP6      = _mm_set1_pd(-2.8175401114513378771);
 +    const __m128d CCP5      = _mm_set1_pd(-3.22729451764143718517);
 +    const __m128d CCP4      = _mm_set1_pd(-2.5518551727311523996);
 +    const __m128d CCP3      = _mm_set1_pd(-0.687717681153649930619);
 +    const __m128d CCP2      = _mm_set1_pd(-0.212652252872804219852);
 +    const __m128d CCP1      = _mm_set1_pd(0.0175389834052493308818);
 +    const __m128d CCP0      = _mm_set1_pd(0.00628057170626964891937);
 +
 +    const __m128d CCQ6      = _mm_set1_pd(5.48409182238641741584);
 +    const __m128d CCQ5      = _mm_set1_pd(13.5064170191802889145);
 +    const __m128d CCQ4      = _mm_set1_pd(22.9367376522880577224);
 +    const __m128d CCQ3      = _mm_set1_pd(15.930646027911794143);
 +    const __m128d CCQ2      = _mm_set1_pd(11.0567237927800161565);
 +    const __m128d CCQ1      = _mm_set1_pd(2.79257750980575282228);
 +    /* CCQ0 == 1.0 */
 +    const __m128d CCoffset  = _mm_set1_pd(0.5579090118408203125);
 +
 +    const __m128d one       = _mm_set1_pd(1.0);
 +    const __m128d two       = _mm_set1_pd(2.0);
 +
 +    const __m128d signbit   = gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000,0x00000000,0x80000000,0x00000000) );
 +
 +    __m128d xabs,x2,x4,t,t2,w,w2;
 +    __m128d PolyAP0,PolyAP1,PolyAQ0,PolyAQ1;
 +    __m128d PolyBP0,PolyBP1,PolyBQ0,PolyBQ1;
 +    __m128d PolyCP0,PolyCP1,PolyCQ0,PolyCQ1;
 +    __m128d res_erf,res_erfcB,res_erfcC,res_erfc,res;
 +    __m128d mask,expmx2;
 +
 +    /* Calculate erf() */
 +    xabs     = gmx_mm_abs_pd(x);
 +    x2       = _mm_mul_pd(x,x);
 +    x4       = _mm_mul_pd(x2,x2);
 +
 +    PolyAP0  = _mm_macc_pd(CAP4,x4,CAP2);
 +    PolyAP1  = _mm_macc_pd(CAP3,x4,CAP1);
 +    PolyAP0  = _mm_macc_pd(PolyAP0,x4,CAP0);
 +    PolyAP0  = _mm_macc_pd(PolyAP1,x2,PolyAP0);
 +
 +    PolyAQ1  = _mm_macc_pd(CAQ5,x4,CAQ3);
 +    PolyAQ0  = _mm_macc_pd(CAQ4,x4,CAQ2);
 +    PolyAQ1  = _mm_macc_pd(PolyAQ1,x4,CAQ1);
 +    PolyAQ0  = _mm_macc_pd(PolyAQ0,x4,one);
 +    PolyAQ0  = _mm_macc_pd(PolyAQ1,x2,PolyAQ0);
 +
 +    res_erf  = _mm_macc_pd(PolyAP0,gmx_mm_inv_pd(PolyAQ0),CAoffset);
 +    res_erf  = _mm_mul_pd(x,res_erf);
 +
 +    /* Calculate erfc() in range [1,4.5] */
 +    t       = _mm_sub_pd(xabs,one);
 +    t2      = _mm_mul_pd(t,t);
 +
 +    PolyBP0  = _mm_macc_pd(CBP6,t2,CBP4);
 +    PolyBP1  = _mm_macc_pd(CBP5,t2,CBP3);
 +    PolyBP0  = _mm_macc_pd(PolyBP0,t2,CBP2);
 +    PolyBP1  = _mm_macc_pd(PolyBP1,t2,CBP1);
 +    PolyBP0  = _mm_macc_pd(PolyBP0,t2,CBP0);
 +    PolyBP0  = _mm_macc_pd(PolyBP1,t,PolyBP0);
 +
 +    PolyBQ1 = _mm_macc_pd(CBQ7,t2,CBQ5);
 +    PolyBQ0 = _mm_macc_pd(CBQ6,t2,CBQ4);
 +    PolyBQ1 = _mm_macc_pd(PolyBQ1,t2,CBQ3);
 +    PolyBQ0 = _mm_macc_pd(PolyBQ0,t2,CBQ2);
 +    PolyBQ1 = _mm_macc_pd(PolyBQ1,t2,CBQ1);
 +    PolyBQ0 = _mm_macc_pd(PolyBQ0,t2,one);
 +    PolyBQ0 = _mm_macc_pd(PolyBQ1,t,PolyBQ0);
 +
 +    res_erfcB = _mm_mul_pd(PolyBP0,gmx_mm_inv_pd(PolyBQ0));
 +
 +    res_erfcB = _mm_mul_pd(res_erfcB,xabs);
 +
 +    /* Calculate erfc() in range [4.5,inf] */
 +    w       = gmx_mm_inv_pd(xabs);
 +    w2      = _mm_mul_pd(w,w);
 +
 +    PolyCP0  = _mm_macc_pd(CCP6,w2,CCP4);
 +    PolyCP1  = _mm_macc_pd(CCP5,w2,CCP3);
 +    PolyCP0  = _mm_macc_pd(PolyCP0,w2,CCP2);
 +    PolyCP1  = _mm_macc_pd(PolyCP1,w2,CCP1);
 +    PolyCP0  = _mm_macc_pd(PolyCP0,w2,CCP0);
 +    PolyCP0  = _mm_macc_pd(PolyCP1,w,PolyCP0);
 +
 +    PolyCQ0  = _mm_macc_pd(CCQ6,w2,CCQ4);
 +    PolyCQ1  = _mm_macc_pd(CCQ5,w2,CCQ3);
 +    PolyCQ0  = _mm_macc_pd(PolyCQ0,w2,CCQ2);
 +    PolyCQ1  = _mm_macc_pd(PolyCQ1,w2,CCQ1);
 +    PolyCQ0  = _mm_macc_pd(PolyCQ0,w2,one);
 +    PolyCQ0  = _mm_macc_pd(PolyCQ1,w,PolyCQ0);
 +
 +    expmx2   = gmx_mm_exp_pd( _mm_or_pd(signbit, x2) );
 +
 +    res_erfcC = _mm_macc_pd(PolyCP0,gmx_mm_inv_pd(PolyCQ0),CCoffset);
 +    res_erfcC = _mm_mul_pd(res_erfcC,w);
 +
 +    mask = _mm_cmpgt_pd(xabs,_mm_set1_pd(4.5));
 +    res_erfc = _mm_blendv_pd(res_erfcB,res_erfcC,mask);
 +
 +    res_erfc = _mm_mul_pd(res_erfc,expmx2);
 +
 +    /* erfc(x<0) = 2-erfc(|x|) */
 +    mask = _mm_cmplt_pd(x,_mm_setzero_pd());
 +    res_erfc = _mm_blendv_pd(res_erfc,_mm_sub_pd(two,res_erfc),mask);
 +
 +    /* Select erf() or erfc() */
 +    mask = _mm_cmplt_pd(xabs,one);
 +    res  = _mm_blendv_pd(_mm_sub_pd(one,res_erfc),res_erf,mask);
 +
 +    return res;
 +}
 +
 +
 +static __m128d
 +gmx_mm_erfc_pd(__m128d x)
 +{
 +    /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
 +    const __m128d CAP4      = _mm_set1_pd(-0.431780540597889301512e-4);
 +    const __m128d CAP3      = _mm_set1_pd(-0.00578562306260059236059);
 +    const __m128d CAP2      = _mm_set1_pd(-0.028593586920219752446);
 +    const __m128d CAP1      = _mm_set1_pd(-0.315924962948621698209);
 +    const __m128d CAP0      = _mm_set1_pd(0.14952975608477029151);
 +
 +    const __m128d CAQ5      = _mm_set1_pd(-0.374089300177174709737e-5);
 +    const __m128d CAQ4      = _mm_set1_pd(0.00015126584532155383535);
 +    const __m128d CAQ3      = _mm_set1_pd(0.00536692680669480725423);
 +    const __m128d CAQ2      = _mm_set1_pd(0.0668686825594046122636);
 +    const __m128d CAQ1      = _mm_set1_pd(0.402604990869284362773);
 +    /* CAQ0 == 1.0 */
 +    const __m128d CAoffset  = _mm_set1_pd(0.9788494110107421875);
 +
 +    /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
 +    const __m128d CBP6      = _mm_set1_pd(2.49650423685462752497647637088e-10);
 +    const __m128d CBP5      = _mm_set1_pd(0.00119770193298159629350136085658);
 +    const __m128d CBP4      = _mm_set1_pd(0.0164944422378370965881008942733);
 +    const __m128d CBP3      = _mm_set1_pd(0.0984581468691775932063932439252);
 +    const __m128d CBP2      = _mm_set1_pd(0.317364595806937763843589437418);
 +    const __m128d CBP1      = _mm_set1_pd(0.554167062641455850932670067075);
 +    const __m128d CBP0      = _mm_set1_pd(0.427583576155807163756925301060);
 +    const __m128d CBQ7      = _mm_set1_pd(0.00212288829699830145976198384930);
 +    const __m128d CBQ6      = _mm_set1_pd(0.0334810979522685300554606393425);
 +    const __m128d CBQ5      = _mm_set1_pd(0.2361713785181450957579508850717);
 +    const __m128d CBQ4      = _mm_set1_pd(0.955364736493055670530981883072);
 +    const __m128d CBQ3      = _mm_set1_pd(2.36815675631420037315349279199);
 +    const __m128d CBQ2      = _mm_set1_pd(3.55261649184083035537184223542);
 +    const __m128d CBQ1      = _mm_set1_pd(2.93501136050160872574376997993);
 +    /* CBQ0 == 1.0 */
 +
 +    /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
 +    const __m128d CCP6      = _mm_set1_pd(-2.8175401114513378771);
 +    const __m128d CCP5      = _mm_set1_pd(-3.22729451764143718517);
 +    const __m128d CCP4      = _mm_set1_pd(-2.5518551727311523996);
 +    const __m128d CCP3      = _mm_set1_pd(-0.687717681153649930619);
 +    const __m128d CCP2      = _mm_set1_pd(-0.212652252872804219852);
 +    const __m128d CCP1      = _mm_set1_pd(0.0175389834052493308818);
 +    const __m128d CCP0      = _mm_set1_pd(0.00628057170626964891937);
 +
 +    const __m128d CCQ6      = _mm_set1_pd(5.48409182238641741584);
 +    const __m128d CCQ5      = _mm_set1_pd(13.5064170191802889145);
 +    const __m128d CCQ4      = _mm_set1_pd(22.9367376522880577224);
 +    const __m128d CCQ3      = _mm_set1_pd(15.930646027911794143);
 +    const __m128d CCQ2      = _mm_set1_pd(11.0567237927800161565);
 +    const __m128d CCQ1      = _mm_set1_pd(2.79257750980575282228);
 +    /* CCQ0 == 1.0 */
 +    const __m128d CCoffset  = _mm_set1_pd(0.5579090118408203125);
 +
 +    const __m128d one       = _mm_set1_pd(1.0);
 +    const __m128d two       = _mm_set1_pd(2.0);
 +
 +    const __m128d signbit   = gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000,0x00000000,0x80000000,0x00000000) );
 +
 +    __m128d xabs,x2,x4,t,t2,w,w2;
 +    __m128d PolyAP0,PolyAP1,PolyAQ0,PolyAQ1;
 +    __m128d PolyBP0,PolyBP1,PolyBQ0,PolyBQ1;
 +    __m128d PolyCP0,PolyCP1,PolyCQ0,PolyCQ1;
 +    __m128d res_erf,res_erfcB,res_erfcC,res_erfc,res;
 +    __m128d mask,expmx2;
 +
 +    /* Calculate erf() */
 +    xabs     = gmx_mm_abs_pd(x);
 +    x2       = _mm_mul_pd(x,x);
 +    x4       = _mm_mul_pd(x2,x2);
 +
 +    PolyAP0  = _mm_macc_pd(CAP4,x4,CAP2);
 +    PolyAP1  = _mm_macc_pd(CAP3,x4,CAP1);
 +    PolyAP0  = _mm_macc_pd(PolyAP0,x4,CAP0);
 +    PolyAP0  = _mm_macc_pd(PolyAP1,x2,PolyAP0);
 +
 +    PolyAQ1  = _mm_macc_pd(CAQ5,x4,CAQ3);
 +    PolyAQ0  = _mm_macc_pd(CAQ4,x4,CAQ2);
 +    PolyAQ1  = _mm_macc_pd(PolyAQ1,x4,CAQ1);
 +    PolyAQ0  = _mm_macc_pd(PolyAQ0,x4,one);
 +    PolyAQ0  = _mm_macc_pd(PolyAQ1,x2,PolyAQ0);
 +
 +    res_erf  = _mm_macc_pd(PolyAP0,gmx_mm_inv_pd(PolyAQ0),CAoffset);
 +    res_erf  = _mm_mul_pd(x,res_erf);
 +
 +    /* Calculate erfc() in range [1,4.5] */
 +    t       = _mm_sub_pd(xabs,one);
 +    t2      = _mm_mul_pd(t,t);
 +
 +    PolyBP0  = _mm_macc_pd(CBP6,t2,CBP4);
 +    PolyBP1  = _mm_macc_pd(CBP5,t2,CBP3);
 +    PolyBP0  = _mm_macc_pd(PolyBP0,t2,CBP2);
 +    PolyBP1  = _mm_macc_pd(PolyBP1,t2,CBP1);
 +    PolyBP0  = _mm_macc_pd(PolyBP0,t2,CBP0);
 +    PolyBP0  = _mm_macc_pd(PolyBP1,t,PolyBP0);
 +
 +    PolyBQ1 = _mm_macc_pd(CBQ7,t2,CBQ5);
 +    PolyBQ0 = _mm_macc_pd(CBQ6,t2,CBQ4);
 +    PolyBQ1 = _mm_macc_pd(PolyBQ1,t2,CBQ3);
 +    PolyBQ0 = _mm_macc_pd(PolyBQ0,t2,CBQ2);
 +    PolyBQ1 = _mm_macc_pd(PolyBQ1,t2,CBQ1);
 +    PolyBQ0 = _mm_macc_pd(PolyBQ0,t2,one);
 +    PolyBQ0 = _mm_macc_pd(PolyBQ1,t,PolyBQ0);
 +
 +    res_erfcB = _mm_mul_pd(PolyBP0,gmx_mm_inv_pd(PolyBQ0));
 +
 +    res_erfcB = _mm_mul_pd(res_erfcB,xabs);
 +
 +    /* Calculate erfc() in range [4.5,inf] */
 +    w       = gmx_mm_inv_pd(xabs);
 +    w2      = _mm_mul_pd(w,w);
 +
 +    PolyCP0  = _mm_macc_pd(CCP6,w2,CCP4);
 +    PolyCP1  = _mm_macc_pd(CCP5,w2,CCP3);
 +    PolyCP0  = _mm_macc_pd(PolyCP0,w2,CCP2);
 +    PolyCP1  = _mm_macc_pd(PolyCP1,w2,CCP1);
 +    PolyCP0  = _mm_macc_pd(PolyCP0,w2,CCP0);
 +    PolyCP0  = _mm_macc_pd(PolyCP1,w,PolyCP0);
 +
 +    PolyCQ0  = _mm_macc_pd(CCQ6,w2,CCQ4);
 +    PolyCQ1  = _mm_macc_pd(CCQ5,w2,CCQ3);
 +    PolyCQ0  = _mm_macc_pd(PolyCQ0,w2,CCQ2);
 +    PolyCQ1  = _mm_macc_pd(PolyCQ1,w2,CCQ1);
 +    PolyCQ0  = _mm_macc_pd(PolyCQ0,w2,one);
 +    PolyCQ0  = _mm_macc_pd(PolyCQ1,w,PolyCQ0);
 +
 +    expmx2   = gmx_mm_exp_pd( _mm_or_pd(signbit, x2) );
 +
 +    res_erfcC = _mm_macc_pd(PolyCP0,gmx_mm_inv_pd(PolyCQ0),CCoffset);
 +    res_erfcC = _mm_mul_pd(res_erfcC,w);
 +
 +    mask = _mm_cmpgt_pd(xabs,_mm_set1_pd(4.5));
 +    res_erfc = _mm_blendv_pd(res_erfcB,res_erfcC,mask);
 +
 +    res_erfc = _mm_mul_pd(res_erfc,expmx2);
 +
 +    /* erfc(x<0) = 2-erfc(|x|) */
 +    mask = _mm_cmplt_pd(x,_mm_setzero_pd());
 +    res_erfc = _mm_blendv_pd(res_erfc,_mm_sub_pd(two,res_erfc),mask);
 +
 +    /* Select erf() or erfc() */
 +    mask = _mm_cmplt_pd(xabs,one);
 +    res  = _mm_blendv_pd(res_erfc,_mm_sub_pd(one,res_erf),mask);
 +
 +    return res;
 +}
 +
 +
 +
 +/* Calculate the force correction due to PME analytically.
 + *
 + * This routine is meant to enable analytical evaluation of the
 + * direct-space PME electrostatic force to avoid tables.
 + *
 + * The direct-space potential should be Erfc(beta*r)/r, but there
 + * are some problems evaluating that:
 + *
 + * First, the error function is difficult (read: expensive) to
 + * approxmiate accurately for intermediate to large arguments, and
 + * this happens already in ranges of beta*r that occur in simulations.
 + * Second, we now try to avoid calculating potentials in Gromacs but
 + * use forces directly.
 + *
 + * We can simply things slight by noting that the PME part is really
 + * a correction to the normal Coulomb force since Erfc(z)=1-Erf(z), i.e.
 + *
 + * V= 1/r - Erf(beta*r)/r
 + *
 + * The first term we already have from the inverse square root, so
 + * that we can leave out of this routine.
 + *
 + * For pme tolerances of 1e-3 to 1e-8 and cutoffs of 0.5nm to 1.8nm,
 + * the argument beta*r will be in the range 0.15 to ~4. Use your
 + * favorite plotting program to realize how well-behaved Erf(z)/z is
 + * in this range!
 + *
 + * We approximate f(z)=erf(z)/z with a rational minimax polynomial.
 + * However, it turns out it is more efficient to approximate f(z)/z and
 + * then only use even powers. This is another minor optimization, since
 + * we actually WANT f(z)/z, because it is going to be multiplied by
 + * the vector between the two atoms to get the vectorial force. The
 + * fastest flops are the ones we can avoid calculating!
 + *
 + * So, here's how it should be used:
 + *
 + * 1. Calculate r^2.
 + * 2. Multiply by beta^2, so you get z^2=beta^2*r^2.
 + * 3. Evaluate this routine with z^2 as the argument.
 + * 4. The return value is the expression:
 + *
 + *
 + *       2*exp(-z^2)     erf(z)
 + *       ------------ - --------
 + *       sqrt(Pi)*z^2      z^3
 + *
 + * 5. Multiply the entire expression by beta^3. This will get you
 + *
 + *       beta^3*2*exp(-z^2)     beta^3*erf(z)
 + *       ------------------  - ---------------
 + *          sqrt(Pi)*z^2            z^3
 + *
 + *    or, switching back to r (z=r*beta):
 + *
 + *       2*beta*exp(-r^2*beta^2)   erf(r*beta)
 + *       ----------------------- - -----------
 + *            sqrt(Pi)*r^2            r^3
 + *
 + *
 + *    With a bit of math exercise you should be able to confirm that
 + *    this is exactly D[Erf[beta*r]/r,r] divided by r another time.
 + *
 + * 6. Add the result to 1/r^3, multiply by the product of the charges,
 + *    and you have your force (divided by r). A final multiplication
 + *    with the vector connecting the two particles and you have your
 + *    vectorial force to add to the particles.
 + *
 + */
 +static __m128d
 +gmx_mm_pmecorrF_pd(__m128d z2)
 +{
 +    const __m128d  FN10     = _mm_set1_pd(-8.0072854618360083154e-14);
 +    const __m128d  FN9      = _mm_set1_pd(1.1859116242260148027e-11);
 +    const __m128d  FN8      = _mm_set1_pd(-8.1490406329798423616e-10);
 +    const __m128d  FN7      = _mm_set1_pd(3.4404793543907847655e-8);
 +    const __m128d  FN6      = _mm_set1_pd(-9.9471420832602741006e-7);
 +    const __m128d  FN5      = _mm_set1_pd(0.000020740315999115847456);
 +    const __m128d  FN4      = _mm_set1_pd(-0.00031991745139313364005);
 +    const __m128d  FN3      = _mm_set1_pd(0.0035074449373659008203);
 +    const __m128d  FN2      = _mm_set1_pd(-0.031750380176100813405);
 +    const __m128d  FN1      = _mm_set1_pd(0.13884101728898463426);
 +    const __m128d  FN0      = _mm_set1_pd(-0.75225277815249618847);
 +    
 +    const __m128d  FD5      = _mm_set1_pd(0.000016009278224355026701);
 +    const __m128d  FD4      = _mm_set1_pd(0.00051055686934806966046);
 +    const __m128d  FD3      = _mm_set1_pd(0.0081803507497974289008);
 +    const __m128d  FD2      = _mm_set1_pd(0.077181146026670287235);
 +    const __m128d  FD1      = _mm_set1_pd(0.41543303143712535988);
 +    const __m128d  FD0      = _mm_set1_pd(1.0);
 +    
 +    __m128d z4;
 +    __m128d polyFN0,polyFN1,polyFD0,polyFD1;
 +    
 +    z4             = _mm_mul_pd(z2,z2);
 +    
 +    polyFD1        = _mm_macc_pd(FD5,z4,FD3);
 +    polyFD1        = _mm_macc_pd(polyFD1,z4,FD1);
 +    polyFD1        = _mm_mul_pd(polyFD1,z2);
 +    polyFD0        = _mm_macc_pd(FD4,z4,FD2);
 +    polyFD0        = _mm_macc_pd(polyFD0,z4,FD0);
 +    polyFD0        = _mm_add_pd(polyFD0,polyFD1);
 +    
 +    polyFD0        = gmx_mm_inv_pd(polyFD0);
 +    
 +    polyFN0        = _mm_macc_pd(FN10,z4,FN8);
 +    polyFN0        = _mm_macc_pd(polyFN0,z4,FN6);
 +    polyFN0        = _mm_macc_pd(polyFN0,z4,FN4);
 +    polyFN0        = _mm_macc_pd(polyFN0,z4,FN2);
 +    polyFN0        = _mm_macc_pd(polyFN0,z4,FN0);
 +    polyFN1        = _mm_macc_pd(FN9,z4,FN7);
 +    polyFN1        = _mm_macc_pd(polyFN1,z4,FN5);
 +    polyFN1        = _mm_macc_pd(polyFN1,z4,FN3);
 +    polyFN1        = _mm_macc_pd(polyFN1,z4,FN1);
 +    polyFN0        = _mm_macc_pd(polyFN1,z2,polyFN0);
 +    
 +    return   _mm_mul_pd(polyFN0,polyFD0);
 +}
 +
 +
 +/* Calculate the potential correction due to PME analytically.
 + *
 + * This routine calculates Erf(z)/z, although you should provide z^2
 + * as the input argument.
 + *
 + * Here's how it should be used:
 + *
 + * 1. Calculate r^2.
 + * 2. Multiply by beta^2, so you get z^2=beta^2*r^2.
 + * 3. Evaluate this routine with z^2 as the argument.
 + * 4. The return value is the expression:
 + *
 + *
 + *        erf(z)
 + *       --------
 + *          z
 + *
 + * 5. Multiply the entire expression by beta and switching back to r (z=r*beta):
 + *
 + *       erf(r*beta)
 + *       -----------
 + *           r
 + *
 + * 6. Subtract the result from 1/r, multiply by the product of the charges,
 + *    and you have your potential.
 + *
 + */
 +static __m128d
 +gmx_mm_pmecorrV_pd(__m128d z2)
 +{
 +    const __m128d  VN9      = _mm_set1_pd(-9.3723776169321855475e-13);
 +    const __m128d  VN8      = _mm_set1_pd(1.2280156762674215741e-10);
 +    const __m128d  VN7      = _mm_set1_pd(-7.3562157912251309487e-9);
 +    const __m128d  VN6      = _mm_set1_pd(2.6215886208032517509e-7);
 +    const __m128d  VN5      = _mm_set1_pd(-4.9532491651265819499e-6);
 +    const __m128d  VN4      = _mm_set1_pd(0.00025907400778966060389);
 +    const __m128d  VN3      = _mm_set1_pd(0.0010585044856156469792);
 +    const __m128d  VN2      = _mm_set1_pd(0.045247661136833092885);
 +    const __m128d  VN1      = _mm_set1_pd(0.11643931522926034421);
 +    const __m128d  VN0      = _mm_set1_pd(1.1283791671726767970);
 +    
 +    const __m128d  VD5      = _mm_set1_pd(0.000021784709867336150342);
 +    const __m128d  VD4      = _mm_set1_pd(0.00064293662010911388448);
 +    const __m128d  VD3      = _mm_set1_pd(0.0096311444822588683504);
 +    const __m128d  VD2      = _mm_set1_pd(0.085608012351550627051);
 +    const __m128d  VD1      = _mm_set1_pd(0.43652499166614811084);
 +    const __m128d  VD0      = _mm_set1_pd(1.0);
 +    
 +    __m128d z4;
 +    __m128d polyVN0,polyVN1,polyVD0,polyVD1;
 +    
 +    z4             = _mm_mul_pd(z2,z2);
 +    
 +    polyVD1        = _mm_macc_pd(VD5,z4,VD3);
 +    polyVD0        = _mm_macc_pd(VD4,z4,VD2);
 +    polyVD1        = _mm_macc_pd(polyVD1,z4,VD1);
 +    polyVD0        = _mm_macc_pd(polyVD0,z4,VD0);
 +    polyVD0        = _mm_macc_pd(polyVD1,z2,polyVD0);
 +    
 +    polyVD0        = gmx_mm_inv_pd(polyVD0);
 +    
 +    polyVN1        = _mm_macc_pd(VN9,z4,VN7);
 +    polyVN0        = _mm_macc_pd(VN8,z4,VN6);
 +    polyVN1        = _mm_macc_pd(polyVN1,z4,VN5);
 +    polyVN0        = _mm_macc_pd(polyVN0,z4,VN4);
 +    polyVN1        = _mm_macc_pd(polyVN1,z4,VN3);
 +    polyVN0        = _mm_macc_pd(polyVN0,z4,VN2);
 +    polyVN1        = _mm_macc_pd(polyVN1,z4,VN1);
 +    polyVN0        = _mm_macc_pd(polyVN0,z4,VN0);
 +    polyVN0        = _mm_macc_pd(polyVN1,z2,polyVN0);
 +    
 +    return   _mm_mul_pd(polyVN0,polyVD0);
 +}
 +
 +
 +static int
 +gmx_mm_sincos_pd(__m128d x,
 +                 __m128d *sinval,
 +                 __m128d *cosval)
 +{
 +#ifdef _MSC_VER
 +    __declspec(align(16))
 +    const double sintable[34] =
 +    {
 +        1.00000000000000000e+00 , 0.00000000000000000e+00 ,
 +        9.95184726672196929e-01 , 9.80171403295606036e-02 ,
 +        9.80785280403230431e-01 , 1.95090322016128248e-01 ,
 +        9.56940335732208824e-01 , 2.90284677254462331e-01 ,
 +        9.23879532511286738e-01 , 3.82683432365089782e-01 ,
 +        8.81921264348355050e-01 , 4.71396736825997642e-01 ,
 +        8.31469612302545236e-01 , 5.55570233019602178e-01 ,
 +        7.73010453362736993e-01 , 6.34393284163645488e-01 ,
 +        7.07106781186547573e-01 , 7.07106781186547462e-01 ,
 +        6.34393284163645599e-01 , 7.73010453362736882e-01 ,
 +        5.55570233019602289e-01 , 8.31469612302545125e-01 ,
 +        4.71396736825997809e-01 , 8.81921264348354939e-01 ,
 +        3.82683432365089837e-01 , 9.23879532511286738e-01 ,
 +        2.90284677254462276e-01 , 9.56940335732208935e-01 ,
 +        1.95090322016128304e-01 , 9.80785280403230431e-01 ,
 +        9.80171403295607702e-02 , 9.95184726672196818e-01 ,
 +        0.0 , 1.00000000000000000e+00
 +    };
 +#else
 +    const __m128d sintable[17] =
 +    {
 +        _mm_set_pd( 0.0 , 1.0 ),
 +        _mm_set_pd( sin(  1.0 * (M_PI/2.0) / 16.0) , cos(  1.0 * (M_PI/2.0) / 16.0) ),
 +        _mm_set_pd( sin(  2.0 * (M_PI/2.0) / 16.0) , cos(  2.0 * (M_PI/2.0) / 16.0) ),
 +        _mm_set_pd( sin(  3.0 * (M_PI/2.0) / 16.0) , cos(  3.0 * (M_PI/2.0) / 16.0) ),
 +        _mm_set_pd( sin(  4.0 * (M_PI/2.0) / 16.0) , cos(  4.0 * (M_PI/2.0) / 16.0) ),
 +        _mm_set_pd( sin(  5.0 * (M_PI/2.0) / 16.0) , cos(  5.0 * (M_PI/2.0) / 16.0) ),
 +        _mm_set_pd( sin(  6.0 * (M_PI/2.0) / 16.0) , cos(  6.0 * (M_PI/2.0) / 16.0) ),
 +        _mm_set_pd( sin(  7.0 * (M_PI/2.0) / 16.0) , cos(  7.0 * (M_PI/2.0) / 16.0) ),
 +        _mm_set_pd( sin(  8.0 * (M_PI/2.0) / 16.0) , cos(  8.0 * (M_PI/2.0) / 16.0) ),
 +        _mm_set_pd( sin(  9.0 * (M_PI/2.0) / 16.0) , cos(  9.0 * (M_PI/2.0) / 16.0) ),
 +        _mm_set_pd( sin( 10.0 * (M_PI/2.0) / 16.0) , cos( 10.0 * (M_PI/2.0) / 16.0) ),
 +        _mm_set_pd( sin( 11.0 * (M_PI/2.0) / 16.0) , cos( 11.0 * (M_PI/2.0) / 16.0) ),
 +        _mm_set_pd( sin( 12.0 * (M_PI/2.0) / 16.0) , cos( 12.0 * (M_PI/2.0) / 16.0) ),
 +        _mm_set_pd( sin( 13.0 * (M_PI/2.0) / 16.0) , cos( 13.0 * (M_PI/2.0) / 16.0) ),
 +        _mm_set_pd( sin( 14.0 * (M_PI/2.0) / 16.0) , cos( 14.0 * (M_PI/2.0) / 16.0) ),
 +        _mm_set_pd( sin( 15.0 * (M_PI/2.0) / 16.0) , cos( 15.0 * (M_PI/2.0) / 16.0) ),
 +        _mm_set_pd(  1.0 , 0.0 )
 +    };
 +#endif
 +
 +    const __m128d signmask    = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
 +    const __m128i signbit_epi32  = _mm_set1_epi32(0x80000000);
 +
 +    const __m128d tabscale      = _mm_set1_pd(32.0/M_PI);
 +    const __m128d invtabscale0  = _mm_set1_pd(9.81747508049011230469e-02);
 +    const __m128d invtabscale1  = _mm_set1_pd(1.96197799156550576057e-08);
 +    const __m128i ione          = _mm_set1_epi32(1);
 +    const __m128i i32           = _mm_set1_epi32(32);
 +    const __m128i i16           = _mm_set1_epi32(16);
 +    const __m128i tabmask       = _mm_set1_epi32(0x3F);
 +    const __m128d sinP7         = _mm_set1_pd(-1.0/5040.0);
 +    const __m128d sinP5         = _mm_set1_pd(1.0/120.0);
 +    const __m128d sinP3         = _mm_set1_pd(-1.0/6.0);
 +    const __m128d sinP1         = _mm_set1_pd(1.0);
 +
 +    const __m128d cosP6         = _mm_set1_pd(-1.0/720.0);
 +    const __m128d cosP4         = _mm_set1_pd(1.0/24.0);
 +    const __m128d cosP2         = _mm_set1_pd(-1.0/2.0);
 +    const __m128d cosP0         = _mm_set1_pd(1.0);
 +
 +    __m128d scalex;
 +    __m128i tabidx,corridx;
 +    __m128d xabs,z,z2,polySin,polyCos;
 +    __m128d xpoint;
 +    __m128d ypoint0,ypoint1;
 +
 +    __m128d sinpoint,cospoint;
 +    __m128d xsign,ssign,csign;
 +    __m128i imask,sswapsign,cswapsign;
 +    __m128d minusone;
 +
 +    xsign    = _mm_andnot_pd(signmask,x);
 +    xabs     = _mm_and_pd(x,signmask);
 +
 +    scalex   = _mm_mul_pd(tabscale,xabs);
 +    tabidx   = _mm_cvtpd_epi32(scalex);
 +
 +    xpoint   = _mm_round_pd(scalex,_MM_FROUND_TO_NEAREST_INT);
 +
 +    /* Extended precision arithmetics */
 +    z        = _mm_nmacc_pd(invtabscale0,xpoint,xabs);
 +    z        = _mm_nmacc_pd(invtabscale1,xpoint,z);
 +
 +    /* Range reduction to 0..2*Pi */
 +    tabidx   = _mm_and_si128(tabidx,tabmask);
 +
 +    /* tabidx is now in range [0,..,64] */
 +    imask     = _mm_cmpgt_epi32(tabidx,i32);
 +    sswapsign = imask;
 +    cswapsign = imask;
 +    corridx   = _mm_and_si128(imask,i32);
 +    tabidx    = _mm_sub_epi32(tabidx,corridx);
 +
 +    /* tabidx is now in range [0..32] */
 +    imask     = _mm_cmpgt_epi32(tabidx,i16);
 +    cswapsign = _mm_xor_si128(cswapsign,imask);
 +    corridx   = _mm_sub_epi32(i32,tabidx);
 +    tabidx    = _mm_blendv_epi8(tabidx,corridx,imask);
 +    /* tabidx is now in range [0..16] */
 +    ssign     = _mm_cvtepi32_pd( _mm_or_si128( sswapsign , ione ) );
 +    csign     = _mm_cvtepi32_pd( _mm_or_si128( cswapsign , ione ) );
 +
 +#ifdef _MSC_VER
 +    ypoint0  = _mm_load_pd(sintable + 2*_mm_extract_epi32(tabidx,0));
 +    ypoint1  = _mm_load_pd(sintable + 2*_mm_extract_epi32(tabidx,1));
 +#else
 +    ypoint0  = sintable[_mm_extract_epi32(tabidx,0)];
 +    ypoint1  = sintable[_mm_extract_epi32(tabidx,1)];
 +#endif
 +    sinpoint = _mm_unpackhi_pd(ypoint0,ypoint1);
 +    cospoint = _mm_unpacklo_pd(ypoint0,ypoint1);
 +
 +    sinpoint = _mm_mul_pd(sinpoint,ssign);
 +    cospoint = _mm_mul_pd(cospoint,csign);
 +
 +    z2       = _mm_mul_pd(z,z);
 +
 +    polySin  = _mm_macc_pd(sinP7,z2,sinP5);
 +    polySin  = _mm_macc_pd(polySin,z2,sinP3);
 +    polySin  = _mm_macc_pd(polySin,z2,sinP1);
 +    polySin  = _mm_mul_pd(polySin,z);
 +
 +    polyCos  = _mm_macc_pd(cosP6,z2,cosP4);
 +    polyCos  = _mm_macc_pd(polyCos,z2,cosP2);
 +    polyCos  = _mm_macc_pd(polyCos,z2,cosP0);
 +
 +    *sinval  = _mm_xor_pd(_mm_add_pd( _mm_mul_pd(sinpoint,polyCos) , _mm_mul_pd(cospoint,polySin) ),xsign);
 +    *cosval  = _mm_sub_pd( _mm_mul_pd(cospoint,polyCos) , _mm_mul_pd(sinpoint,polySin) );
 +
 +    return 0;
 +}
 +
 +/*
 + * IMPORTANT: Do NOT call both sin & cos if you need both results, since each of them
 + * will then call the sincos() routine and waste a factor 2 in performance!
 + */
 +static __m128d
 +gmx_mm_sin_pd(__m128d x)
 +{
 +    __m128d s,c;
 +    gmx_mm_sincos_pd(x,&s,&c);
 +    return s;
 +}
 +
 +/*
 + * IMPORTANT: Do NOT call both sin & cos if you need both results, since each of them
 + * will then call the sincos() routine and waste a factor 2 in performance!
 + */
 +static __m128d
 +gmx_mm_cos_pd(__m128d x)
 +{
 +    __m128d s,c;
 +    gmx_mm_sincos_pd(x,&s,&c);
 +    return c;
 +}
 +
 +
 +
 +static __m128d
 +gmx_mm_tan_pd(__m128d x)
 +{
 +    __m128d sinval,cosval;
 +    __m128d tanval;
 +
 +    gmx_mm_sincos_pd(x,&sinval,&cosval);
 +
 +    tanval = _mm_mul_pd(sinval,gmx_mm_inv_pd(cosval));
 +
 +    return tanval;
 +}
 +
 +
 +
 +static __m128d
 +gmx_mm_asin_pd(__m128d x)
 +{
 +    /* Same algorithm as cephes library */
 +    const __m128d signmask  = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
 +    const __m128d limit1    = _mm_set1_pd(0.625);
 +    const __m128d limit2    = _mm_set1_pd(1e-8);
 +    const __m128d one       = _mm_set1_pd(1.0);
 +    const __m128d halfpi    = _mm_set1_pd(M_PI/2.0);
 +    const __m128d quarterpi = _mm_set1_pd(M_PI/4.0);
 +    const __m128d morebits  = _mm_set1_pd(6.123233995736765886130e-17);
 +
 +    const __m128d P5        = _mm_set1_pd(4.253011369004428248960e-3);
 +    const __m128d P4        = _mm_set1_pd(-6.019598008014123785661e-1);
 +    const __m128d P3        = _mm_set1_pd(5.444622390564711410273e0);
 +    const __m128d P2        = _mm_set1_pd(-1.626247967210700244449e1);
 +    const __m128d P1        = _mm_set1_pd(1.956261983317594739197e1);
 +    const __m128d P0        = _mm_set1_pd(-8.198089802484824371615e0);
 +
 +    const __m128d Q4        = _mm_set1_pd(-1.474091372988853791896e1);
 +    const __m128d Q3        = _mm_set1_pd(7.049610280856842141659e1);
 +    const __m128d Q2        = _mm_set1_pd(-1.471791292232726029859e2);
 +    const __m128d Q1        = _mm_set1_pd(1.395105614657485689735e2);
 +    const __m128d Q0        = _mm_set1_pd(-4.918853881490881290097e1);
 +
 +    const __m128d R4        = _mm_set1_pd(2.967721961301243206100e-3);
 +    const __m128d R3        = _mm_set1_pd(-5.634242780008963776856e-1);
 +    const __m128d R2        = _mm_set1_pd(6.968710824104713396794e0);
 +    const __m128d R1        = _mm_set1_pd(-2.556901049652824852289e1);
 +    const __m128d R0        = _mm_set1_pd(2.853665548261061424989e1);
 +
 +    const __m128d S3        = _mm_set1_pd(-2.194779531642920639778e1);
 +    const __m128d S2        = _mm_set1_pd(1.470656354026814941758e2);
 +    const __m128d S1        = _mm_set1_pd(-3.838770957603691357202e2);
 +    const __m128d S0        = _mm_set1_pd(3.424398657913078477438e2);
 +
 +    __m128d sign;
 +    __m128d mask;
 +    __m128d xabs;
 +    __m128d zz,ww,z,q,w,y,zz2,ww2;
 +    __m128d PA,PB;
 +    __m128d QA,QB;
 +    __m128d RA,RB;
 +    __m128d SA,SB;
 +    __m128d nom,denom;
 +
 +    sign  = _mm_andnot_pd(signmask,x);
 +    xabs  = _mm_and_pd(x,signmask);
 +
 +    mask  = _mm_cmpgt_pd(xabs,limit1);
 +
 +    zz    = _mm_sub_pd(one,xabs);
 +    ww    = _mm_mul_pd(xabs,xabs);
 +    zz2   = _mm_mul_pd(zz,zz);
 +    ww2   = _mm_mul_pd(ww,ww);
 +
 +    /* R */
 +    RA    = _mm_macc_pd(R4,zz2,R2);
 +    RB    = _mm_macc_pd(R3,zz2,R1);
 +    RA    = _mm_macc_pd(RA,zz2,R0);
 +    RA    = _mm_macc_pd(RB,zz,RA);
 +
 +    /* S, SA = zz2 */
 +    SB    = _mm_macc_pd(S3,zz2,S1);
 +    SA    = _mm_add_pd(zz2,S2);
 +    SA    = _mm_macc_pd(SA,zz2,S0);
 +    SA    = _mm_macc_pd(SB,zz,SA);
 +
 +    /* P */
 +    PA    = _mm_macc_pd(P5,ww2,P3);
 +    PB    = _mm_macc_pd(P4,ww2,P2);
 +    PA    = _mm_macc_pd(PA,ww2,P1);
 +    PB    = _mm_macc_pd(PB,ww2,P0);
 +    PA    = _mm_macc_pd(PA,ww,PB);
 +
 +    /* Q, QA = ww2 */
 +    QB    = _mm_macc_pd(Q4,ww2,Q2);
 +    QA    = _mm_add_pd(ww2,Q3);
 +    QA    = _mm_macc_pd(QA,ww2,Q1);
 +    QB    = _mm_macc_pd(QB,ww2,Q0);
 +    QA    = _mm_macc_pd(QA,ww,QB);
 +
 +    RA    = _mm_mul_pd(RA,zz);
 +    PA    = _mm_mul_pd(PA,ww);
 +
 +    nom   = _mm_blendv_pd( PA,RA,mask );
 +    denom = _mm_blendv_pd( QA,SA,mask );
 +
 +    q     = _mm_mul_pd( nom, gmx_mm_inv_pd(denom) );
 +
 +    zz    = _mm_add_pd(zz,zz);
 +    zz    = gmx_mm_sqrt_pd(zz);
 +    z     = _mm_sub_pd(quarterpi,zz);
 +    zz    = _mm_mul_pd(zz,q);
 +    zz    = _mm_sub_pd(zz,morebits);
 +    z     = _mm_sub_pd(z,zz);
 +    z     = _mm_add_pd(z,quarterpi);
 +
 +    w     = _mm_macc_pd(xabs,q,xabs);
 +
 +    z     = _mm_blendv_pd( w,z,mask );
 +
 +    mask  = _mm_cmpgt_pd(xabs,limit2);
 +    z     = _mm_blendv_pd( xabs,z,mask );
 +
 +    z = _mm_xor_pd(z,sign);
 +
 +    return z;
 +}
 +
 +
 +static __m128d
 +gmx_mm_acos_pd(__m128d x)
 +{
 +    const __m128d signmask  = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
 +    const __m128d one        = _mm_set1_pd(1.0);
 +    const __m128d half       = _mm_set1_pd(0.5);
 +    const __m128d pi         = _mm_set1_pd(M_PI);
 +    const __m128d quarterpi0 = _mm_set1_pd(7.85398163397448309616e-1);
 +    const __m128d quarterpi1 = _mm_set1_pd(6.123233995736765886130e-17);
 +
 +
 +    __m128d mask1;
 +
 +    __m128d z,z1,z2;
 +
 +    mask1 = _mm_cmpgt_pd(x,half);
 +    z1    = _mm_mul_pd(half,_mm_sub_pd(one,x));
 +    z1    = gmx_mm_sqrt_pd(z1);
 +    z     = _mm_blendv_pd( x,z1,mask1 );
 +
 +    z     = gmx_mm_asin_pd(z);
 +
 +    z1    = _mm_add_pd(z,z);
 +
 +    z2    = _mm_sub_pd(quarterpi0,z);
 +    z2    = _mm_add_pd(z2,quarterpi1);
 +    z2    = _mm_add_pd(z2,quarterpi0);
 +
 +    z     = _mm_blendv_pd(z2,z1,mask1);
 +
 +    return z;
 +}
 +
 +static __m128d
 +gmx_mm_atan_pd(__m128d x)
 +{
 +    /* Same algorithm as cephes library */
 +    const __m128d signmask  = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
 +    const __m128d limit1    = _mm_set1_pd(0.66);
 +    const __m128d limit2    = _mm_set1_pd(2.41421356237309504880);
 +    const __m128d quarterpi = _mm_set1_pd(M_PI/4.0);
 +    const __m128d halfpi    = _mm_set1_pd(M_PI/2.0);
 +    const __m128d mone      = _mm_set1_pd(-1.0);
 +    const __m128d morebits1 = _mm_set1_pd(0.5*6.123233995736765886130E-17);
 +    const __m128d morebits2 = _mm_set1_pd(6.123233995736765886130E-17);
 +
 +    const __m128d P4        = _mm_set1_pd(-8.750608600031904122785E-1);
 +    const __m128d P3        = _mm_set1_pd(-1.615753718733365076637E1);
 +    const __m128d P2        = _mm_set1_pd(-7.500855792314704667340E1);
 +    const __m128d P1        = _mm_set1_pd(-1.228866684490136173410E2);
 +    const __m128d P0        = _mm_set1_pd(-6.485021904942025371773E1);
 +
 +    const __m128d Q4        = _mm_set1_pd(2.485846490142306297962E1);
 +    const __m128d Q3        = _mm_set1_pd(1.650270098316988542046E2);
 +    const __m128d Q2        = _mm_set1_pd(4.328810604912902668951E2);
 +    const __m128d Q1        = _mm_set1_pd(4.853903996359136964868E2);
 +    const __m128d Q0        = _mm_set1_pd(1.945506571482613964425E2);
 +
 +    __m128d sign;
 +    __m128d mask1,mask2;
 +    __m128d y,t1,t2;
 +    __m128d z,z2;
 +    __m128d P_A,P_B,Q_A,Q_B;
 +
 +    sign   = _mm_andnot_pd(signmask,x);
 +    x      = _mm_and_pd(x,signmask);
 +
 +    mask1  = _mm_cmpgt_pd(x,limit1);
 +    mask2  = _mm_cmpgt_pd(x,limit2);
 +
 +    t1     = _mm_mul_pd(_mm_add_pd(x,mone),gmx_mm_inv_pd(_mm_sub_pd(x,mone)));
 +    t2     = _mm_mul_pd(mone,gmx_mm_inv_pd(x));
 +
 +    y      = _mm_and_pd(mask1,quarterpi);
 +    y      = _mm_or_pd( _mm_and_pd(mask2,halfpi) , _mm_andnot_pd(mask2,y) );
 +
 +    x      = _mm_or_pd( _mm_and_pd(mask1,t1) , _mm_andnot_pd(mask1,x) );
 +    x      = _mm_or_pd( _mm_and_pd(mask2,t2) , _mm_andnot_pd(mask2,x) );
 +
 +    z      = _mm_mul_pd(x,x);
 +    z2     = _mm_mul_pd(z,z);
 +
 +    P_A    = _mm_macc_pd(P4,z2,P2);
 +    P_B    = _mm_macc_pd(P3,z2,P1);
 +    P_A    = _mm_macc_pd(P_A,z2,P0);
 +    P_A    = _mm_macc_pd(P_B,z,P_A);
 +
 +    /* Q_A = z2 */
 +    Q_B    = _mm_macc_pd(Q4,z2,Q2);
 +    Q_A    = _mm_add_pd(z2,Q3);
 +    Q_A    = _mm_macc_pd(Q_A,z2,Q1);
 +    Q_B    = _mm_macc_pd(Q_B,z2,Q0);
 +    Q_A    = _mm_macc_pd(Q_A,z,Q_B);
 +
 +    z      = _mm_mul_pd(z,P_A);
 +    z      = _mm_mul_pd(z,gmx_mm_inv_pd(Q_A));
 +    z      = _mm_macc_pd(z,x,x);
 +
 +    t1     = _mm_and_pd(mask1,morebits1);
 +    t1     = _mm_or_pd( _mm_and_pd(mask2,morebits2) , _mm_andnot_pd(mask2,t1) );
 +
 +    z      = _mm_add_pd(z,t1);
 +    y      = _mm_add_pd(y,z);
 +
 +    y      = _mm_xor_pd(y,sign);
 +
 +    return y;
 +}
 +
 +
 +static __m128d
 +gmx_mm_atan2_pd(__m128d y, __m128d x)
 +{
 +    const __m128d pi          = _mm_set1_pd(M_PI);
 +    const __m128d minuspi     = _mm_set1_pd(-M_PI);
 +    const __m128d halfpi      = _mm_set1_pd(M_PI/2.0);
 +    const __m128d minushalfpi = _mm_set1_pd(-M_PI/2.0);
 +
 +    __m128d z,z1,z3,z4;
 +    __m128d w;
 +    __m128d maskx_lt,maskx_eq;
 +    __m128d masky_lt,masky_eq;
 +    __m128d mask1,mask2,mask3,mask4,maskall;
 +
 +    maskx_lt  = _mm_cmplt_pd(x,_mm_setzero_pd());
 +    masky_lt  = _mm_cmplt_pd(y,_mm_setzero_pd());
 +    maskx_eq  = _mm_cmpeq_pd(x,_mm_setzero_pd());
 +    masky_eq  = _mm_cmpeq_pd(y,_mm_setzero_pd());
 +
 +    z         = _mm_mul_pd(y,gmx_mm_inv_pd(x));
 +    z         = gmx_mm_atan_pd(z);
 +
 +    mask1     = _mm_and_pd(maskx_eq,masky_lt);
 +    mask2     = _mm_andnot_pd(maskx_lt,masky_eq);
 +    mask3     = _mm_andnot_pd( _mm_or_pd(masky_lt,masky_eq) , maskx_eq);
 +    mask4     = _mm_and_pd(masky_eq,maskx_lt);
 +
 +    maskall   = _mm_or_pd( _mm_or_pd(mask1,mask2), _mm_or_pd(mask3,mask4) );
 +
 +    z         = _mm_andnot_pd(maskall,z);
 +    z1        = _mm_and_pd(mask1,minushalfpi);
 +    z3        = _mm_and_pd(mask3,halfpi);
 +    z4        = _mm_and_pd(mask4,pi);
 +
 +    z         = _mm_or_pd( _mm_or_pd(z,z1), _mm_or_pd(z3,z4) );
 +
 +    w         = _mm_blendv_pd(pi,minuspi,masky_lt);
 +    w         = _mm_and_pd(w,maskx_lt);
 +
 +    w         = _mm_andnot_pd(maskall,w);
 +
 +    z         = _mm_add_pd(z,w);
 +
 +    return z;
 +}
 +
 +#endif /*_gmx_math_x86_avx_128_fma_double_h_ */
index 13a02078b02ca95efa2b8540efaa82110d34adae,0000000000000000000000000000000000000000..60bfd71a8530337500b643757ced6fc997d56518
mode 100644,000000..100644
--- /dev/null
@@@ -1,197 -1,0 +1,214 @@@
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + * This file is part of GROMACS.
 + * Copyright (c) 2012-  
 + *
 + * Written by the Gromacs development team under coordination of
 + * David van der Spoel, Berk Hess, and Erik Lindahl.
 + *
 + * This library is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the research papers on the package. Check out http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gnomes, ROck Monsters And Chili Sauce
 + */
 +#ifndef _gmx_x86_avx_128_fma_h_
 +#define _gmx_x86_avx_128_fma_h_
 +
 +
 +#include <immintrin.h>
 +#ifdef HAVE_X86INTRIN_H
 +#include <x86intrin.h> /* FMA */
 +#endif
++#ifdef HAVE_INTRIN_H
++#include <intrin.h> /* FMA MSVC */
++#endif
++
 +
 +#include <stdio.h>
 +
 +#include "types/simple.h"
 +
 +
 +#define gmx_mm_extract_epi32(x, imm) _mm_cvtsi128_si32(_mm_srli_si128((x), 4 * (imm)))
 +
 +#define _GMX_MM_BLEND(b3,b2,b1,b0) (((b3) << 3) | ((b2) << 2) | ((b1) << 1) | ((b0)))
 +
 +#define _GMX_MM_PERMUTE128D(fp1,fp0)         (((fp1) << 1) | ((fp0)))
 +
 +
 +#define GMX_MM_TRANSPOSE2_PD(row0, row1) {           \
 +    __m128d __gmx_t1 = row0;                         \
 +    row0           = _mm_unpacklo_pd(row0,row1);     \
 +    row1           = _mm_unpackhi_pd(__gmx_t1,row1); \
 +}
 +
 +
 +#if (defined (_MSC_VER) || defined(__INTEL_COMPILER))
 +#  define gmx_mm_castsi128_ps(a) _mm_castsi128_ps(a)
 +#  define gmx_mm_castps_si128(a) _mm_castps_si128(a)
 +#  define gmx_mm_castps_ps128(a) (a)
 +#  define gmx_mm_castsi128_pd(a) _mm_castsi128_pd(a)
 +#  define gmx_mm_castpd_si128(a) _mm_castpd_si128(a)
 +#elif defined(__GNUC__)
 +#  define gmx_mm_castsi128_ps(a) ((__m128)(a))
 +#  define gmx_mm_castps_si128(a) ((__m128i)(a))
 +#  define gmx_mm_castps_ps128(a) ((__m128)(a))
 +#  define gmx_mm_castsi128_pd(a) ((__m128d)(a))
 +#  define gmx_mm_castpd_si128(a) ((__m128i)(a))
 +#else
 +static __m128  gmx_mm_castsi128_ps(__m128i a)
 +{
 +    return *(__m128 *) &a;
 +}
 +static __m128i gmx_mm_castps_si128(__m128 a)
 +{
 +    return *(__m128i *) &a;
 +}
 +static __m128  gmx_mm_castps_ps128(__m128 a)
 +{
 +    return *(__m128 *) &a;
 +}
 +static __m128d gmx_mm_castsi128_pd(__m128i a)
 +{
 +    return *(__m128d *) &a;
 +}
 +static __m128i gmx_mm_castpd_si128(__m128d a)
 +{
 +    return *(__m128i *) &a;
 +}
 +#endif
 +
 +#if GMX_EMULATE_AMD_FMA
 +/* Wrapper routines so we can do test builds on non-FMA or non-AMD hardware */
 +static __m128
 +_mm_macc_ps(__m128 a, __m128 b, __m128 c)
 +{
 +    return _mm_add_ps(c,_mm_mul_ps(a,b));
 +}
 +
 +static __m128
 +_mm_nmacc_ps(__m128 a, __m128 b, __m128 c)
 +{
 +    return _mm_sub_ps(c,_mm_mul_ps(a,b));
 +}
 +
 +static __m128
 +_mm_msub_ps(__m128 a, __m128 b, __m128 c)
 +{
 +    return _mm_sub_ps(_mm_mul_ps(a,b),c);
 +}
 +
 +static __m128d
 +_mm_macc_pd(__m128d a, __m128d b, __m128d c)
 +{
 +    return _mm_add_pd(c,_mm_mul_pd(a,b));
 +}
 +
 +static __m128d
 +_mm_nmacc_pd(__m128d a, __m128d b, __m128d c)
 +{
 +    return _mm_sub_pd(c,_mm_mul_pd(a,b));
 +}
 +
 +static __m128d
 +_mm_msub_pd(__m128d a, __m128d b, __m128d c)
 +{
 +    return _mm_sub_pd(_mm_mul_pd(a,b),c);
 +}
 +#endif /* AMD FMA emulation support */
 +
 +static void
 +gmx_mm_printxmm_ps(const char *s,__m128 xmm)
 +{
 +    float f[4];
 +
 +    _mm_storeu_ps(f,xmm);
 +    printf("%s: %15.10e %15.10e %15.10e %15.10e\n",s,f[0],f[1],f[2],f[3]);
 +}
 +
 +
 +static void
 +gmx_mm_printxmmsum_ps(const char *s,__m128 xmm)
 +{
 +    float f[4];
 +
 +    _mm_storeu_ps(f,xmm);
 +    printf("%s (sum): %15.10g\n",s,f[0]+f[1]+f[2]+f[3]);
 +}
 +
 +
 +static void
 +gmx_mm_printxmm_pd(const char *s,__m128d xmm)
 +{
 +    double f[2];
 +
 +    _mm_storeu_pd(f,xmm);
 +    printf("%s: %30.20e %30.20e\n",s,f[0],f[1]);
 +}
 +
 +static void
 +gmx_mm_printxmmsum_pd(const char *s,__m128d xmm)
 +{
 +    double f[2];
 +
 +    _mm_storeu_pd(f,xmm);
 +    printf("%s (sum): %15.10g\n",s,f[0]+f[1]);
 +}
 +
 +
 +static void
 +gmx_mm_printxmm_epi32(const char *s,__m128i xmmi)
 +{
 +    int i[4];
 +
 +    _mm_storeu_si128((__m128i *)i,xmmi);
 +    printf("%10s: %2d %2d %2d %2d\n",s,i[0],i[1],i[2],i[3]);
 +}
 +
 +
 +
 +static int gmx_mm_check_and_reset_overflow(void)
 +{
 +    int MXCSR;
 +    int sse_overflow;
 +
 +    MXCSR = _mm_getcsr();
 +    /* The overflow flag is bit 3 in the register */
 +    if (MXCSR & 0x0008)
 +    {
 +        sse_overflow = 1;
 +        /* Set the overflow flag to zero */
 +        MXCSR = MXCSR & 0xFFF7;
 +        _mm_setcsr(MXCSR);
 +    }
 +    else
 +    {
 +        sse_overflow = 0;
 +    }
 +
 +    return sse_overflow;
 +}
 +
++/* Work around gcc bug with wrong type for mask formal parameter to maskload/maskstore */
++#ifdef GMX_X86_AVX_GCC_MASKLOAD_BUG
++#    define gmx_mm_maskload_ps(mem,mask)       _mm_maskload_ps((mem),_mm_castsi128_ps(mask))
++#    define gmx_mm_maskstore_ps(mem,mask,x)    _mm_maskstore_ps((mem),_mm_castsi128_ps(mask),(x))
++#    define gmx_mm256_maskload_ps(mem,mask)    _mm256_maskload_ps((mem),_mm256_castsi256_ps(mask))
++#    define gmx_mm256_maskstore_ps(mem,mask,x) _mm256_maskstore_ps((mem),_mm256_castsi256_ps(mask),(x))
++#else
++#    define gmx_mm_maskload_ps(mem,mask)       _mm_maskload_ps((mem),(mask))
++#    define gmx_mm_maskstore_ps(mem,mask,x)    _mm_maskstore_ps((mem),(mask),(x))
++#    define gmx_mm256_maskload_ps(mem,mask)    _mm256_maskload_ps((mem),(mask))
++#    define gmx_mm256_maskstore_ps(mem,mask,x) _mm256_maskstore_ps((mem),(mask),(x))
++#endif
++
 +
 +
 +#endif /* _gmx_x86_avx_128_fma_h_ */
index ed8ec05d1005ed4d4beb771cd86ab1aa9a630e04,0000000000000000000000000000000000000000..360c941f6e734c62f18bea1bdd9b35d97a332f2b
mode 100644,000000..100644
--- /dev/null
@@@ -1,277 -1,0 +1,289 @@@
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + * This file is part of GROMACS.
 + * Copyright (c) 2012-  
 + *
 + * Written by the Gromacs development team under coordination of
 + * David van der Spoel, Berk Hess, and Erik Lindahl.
 + *
 + * This library is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the research papers on the package. Check out http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gnomes, ROck Monsters And Chili Sauce
 + */
 +#ifndef _gmx_x86_avx_256_h_
 +#define _gmx_x86_avx_256_h_
 +
 +
 +#include <immintrin.h>
 +#ifdef HAVE_X86INTRIN_H
 +#include <x86intrin.h> /* FMA */
 +#endif
 +
 +
 +#include <stdio.h>
 +
 +#include "types/simple.h"
 +
 +
 +#define gmx_mm_extract_epi32(x, imm) _mm_cvtsi128_si32(_mm_srli_si128((x), 4 * (imm)))
 +
 +#define _GMX_MM_BLEND256D(b3,b2,b1,b0) (((b3) << 3) | ((b2) << 2) | ((b1) << 1) | ((b0)))
 +#define _GMX_MM_PERMUTE(fp3,fp2,fp1,fp0) (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0)))
 +#define _GMX_MM_PERMUTE256D(fp3,fp2,fp1,fp0) (((fp3) << 3) | ((fp2) << 2) | ((fp1) << 1) | ((fp0)))
 +#define _GMX_MM_PERMUTE128D(fp1,fp0)         (((fp1) << 1) | ((fp0)))
 +
 +
 +#define GMX_MM_TRANSPOSE2_PD(row0, row1) {           \
 +    __m128d __gmx_t1 = row0;                         \
 +    row0           = _mm_unpacklo_pd(row0,row1);     \
 +    row1           = _mm_unpackhi_pd(__gmx_t1,row1); \
 +}
 +
 +#define GMX_MM256_FULLTRANSPOSE4_PD(row0,row1,row2,row3) \
 +{                                                        \
 +    __m256d _t0, _t1, _t2, _t3;                          \
 +    _t0  = _mm256_unpacklo_pd((row0), (row1));           \
 +    _t1  = _mm256_unpackhi_pd((row0), (row1));           \
 +    _t2  = _mm256_unpacklo_pd((row2), (row3));           \
 +    _t3  = _mm256_unpackhi_pd((row2), (row3));           \
 +    row0 = _mm256_permute2f128_pd(_t0, _t2, 0x20);       \
 +    row1 = _mm256_permute2f128_pd(_t1, _t3, 0x20);       \
 +    row2 = _mm256_permute2f128_pd(_t0, _t2, 0x31);       \
 +    row3 = _mm256_permute2f128_pd(_t1, _t3, 0x31);       \
 +}
 +
 +#if (defined (_MSC_VER) || defined(__INTEL_COMPILER))
 +#  define gmx_mm_castsi128_ps(a) _mm_castsi128_ps(a)
 +#  define gmx_mm_castps_si128(a) _mm_castps_si128(a)
 +#  define gmx_mm_castps_ps128(a) (a)
 +#  define gmx_mm_castsi128_pd(a) _mm_castsi128_pd(a)
 +#  define gmx_mm_castpd_si128(a) _mm_castpd_si128(a)
 +#elif defined(__GNUC__)
 +#  define gmx_mm_castsi128_ps(a) ((__m128)(a))
 +#  define gmx_mm_castps_si128(a) ((__m128i)(a))
 +#  define gmx_mm_castps_ps128(a) ((__m128)(a))
 +#  define gmx_mm_castsi128_pd(a) ((__m128d)(a))
 +#  define gmx_mm_castpd_si128(a) ((__m128i)(a))
 +#else
 +static __m128  gmx_mm_castsi128_ps(__m128i a)
 +{
 +    return *(__m128 *) &a;
 +}
 +static __m128i gmx_mm_castps_si128(__m128 a)
 +{
 +    return *(__m128i *) &a;
 +}
 +static __m128  gmx_mm_castps_ps128(__m128 a)
 +{
 +    return *(__m128 *) &a;
 +}
 +static __m128d gmx_mm_castsi128_pd(__m128i a)
 +{
 +    return *(__m128d *) &a;
 +}
 +static __m128i gmx_mm_castpd_si128(__m128d a)
 +{
 +    return *(__m128i *) &a;
 +}
 +#endif
 +
 +static gmx_inline __m256
 +gmx_mm256_unpack128lo_ps(__m256 xmm1, __m256 xmm2)
 +{
 +    return _mm256_permute2f128_ps(xmm1,xmm2,0x20);
 +}
 +
 +static gmx_inline __m256
 +gmx_mm256_unpack128hi_ps(__m256 xmm1, __m256 xmm2)
 +{
 +    return _mm256_permute2f128_ps(xmm1,xmm2,0x31);
 +}
 +
 +static gmx_inline __m256
 +gmx_mm256_set_m128(__m128 hi, __m128 lo)
 +{
 +    return _mm256_insertf128_ps(_mm256_castps128_ps256(lo), hi, 0x1);
 +}
 +
 +
 +static gmx_inline __m256
 +gmx_mm256_load4_ps(float const * p)
 +{
 +    __m128 a;
 +
 +    a = _mm_load_ps(p);
 +    return _mm256_insertf128_ps(_mm256_castps128_ps256(a), a, 0x1);
 +}
 +
 +
 +static __m256d
 +gmx_mm256_unpack128lo_pd(__m256d xmm1, __m256d xmm2)
 +{
 +    return _mm256_permute2f128_pd(xmm1,xmm2,0x20);
 +}
 +
 +static __m256d
 +gmx_mm256_unpack128hi_pd(__m256d xmm1, __m256d xmm2)
 +{
 +    return _mm256_permute2f128_pd(xmm1,xmm2,0x31);
 +}
 +
 +static __m256d
 +gmx_mm256_set_m128d(__m128d hi, __m128d lo)
 +{
 +    return _mm256_insertf128_pd(_mm256_castpd128_pd256(lo), hi, 0x1);
 +}
 +
 +
 +static __m128 gmx_mm256_sum4h_m128(__m256 x, __m256 y)
 +{
 +    __m256 sum;
 +
 +    sum = _mm256_add_ps(x,y);
 +    return _mm_add_ps(_mm256_castps256_ps128(sum),_mm256_extractf128_ps(sum,0x1));
 +}
 +
 +
 +static void
 +gmx_mm_printxmm_ps(const char *s,__m128 xmm)
 +{
 +    float f[4];
 +
 +    _mm_storeu_ps(f,xmm);
 +    printf("%s: %15.10e %15.10e %15.10e %15.10e\n",s,f[0],f[1],f[2],f[3]);
 +}
 +
 +
 +static void
 +gmx_mm_printxmmsum_ps(const char *s,__m128 xmm)
 +{
 +    float f[4];
 +
 +    _mm_storeu_ps(f,xmm);
 +    printf("%s (sum): %15.10g\n",s,f[0]+f[1]+f[2]+f[3]);
 +}
 +
 +
 +static void
 +gmx_mm_printxmm_pd(const char *s,__m128d xmm)
 +{
 +    double f[2];
 +
 +    _mm_storeu_pd(f,xmm);
 +    printf("%s: %30.20e %30.20e\n",s,f[0],f[1]);
 +}
 +
 +static void
 +gmx_mm_printxmmsum_pd(const char *s,__m128d xmm)
 +{
 +    double f[2];
 +
 +    _mm_storeu_pd(f,xmm);
 +    printf("%s (sum): %15.10g\n",s,f[0]+f[1]);
 +}
 +
 +
 +static void
 +gmx_mm_printxmm_epi32(const char *s,__m128i xmmi)
 +{
 +    int i[4];
 +
 +    _mm_storeu_si128((__m128i *)i,xmmi);
 +    printf("%10s: %2d %2d %2d %2d\n",s,i[0],i[1],i[2],i[3]);
 +}
 +
 +static void
 +gmx_mm256_printymm_ps(const char *s,__m256 ymm)
 +{
 +    float f[8];
 +
 +    _mm256_storeu_ps(f,ymm);
 +    printf("%s: %12.7f %12.7f %12.7f %12.7f %12.7f %12.7f %12.7f %12.7f\n",s,f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7]);
 +}
 +
 +static void
 +gmx_mm256_printymmsum_ps(const char *s,__m256 ymm)
 +{
 +    float f[8];
 +
 +    _mm256_storeu_ps(f,ymm);
 +    printf("%s (sum): %15.10g\n",s,f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]);
 +}
 +
 +
 +static void
 +gmx_mm256_printymm_pd(const char *s,__m256d ymm)
 +{
 +    double f[4];
 +
 +    _mm256_storeu_pd(f,ymm);
 +    printf("%s: %16.12f %16.12f %16.12f %16.12f\n",s,f[0],f[1],f[2],f[3]);
 +}
 +
 +static void
 +gmx_mm256_printymmsum_pd(const char *s,__m256d ymm)
 +{
 +    double f[4];
 +
 +    _mm256_storeu_pd(f,ymm);
 +    printf("%s (sum): %15.10g\n",s,f[0]+f[1]+f[2]+f[3]);
 +}
 +
 +
 +
 +static void
 +gmx_mm256_printymm_epi32(const char *s,__m256i ymmi)
 +{
 +    int i[8];
 +
 +    _mm256_storeu_si256((__m256i *)i,ymmi);
 +    printf("%10s: %2d %2d %2d %2d %2d %2d %2d %2d\n",s,i[0],i[1],i[2],i[3],i[4],i[5],i[6],i[7]);
 +}
 +
 +
 +
 +static int gmx_mm_check_and_reset_overflow(void)
 +{
 +    int MXCSR;
 +    int sse_overflow;
 +
 +    MXCSR = _mm_getcsr();
 +    /* The overflow flag is bit 3 in the register */
 +    if (MXCSR & 0x0008)
 +    {
 +        sse_overflow = 1;
 +        /* Set the overflow flag to zero */
 +        MXCSR = MXCSR & 0xFFF7;
 +        _mm_setcsr(MXCSR);
 +    }
 +    else
 +    {
 +        sse_overflow = 0;
 +    }
 +
 +    return sse_overflow;
 +}
 +
++/* Work around gcc bug with wrong type for mask formal parameter to maskload/maskstore */
++#ifdef GMX_X86_AVX_GCC_MASKLOAD_BUG
++#    define gmx_mm_maskload_ps(mem,mask)       _mm_maskload_ps((mem),_mm_castsi128_ps(mask))
++#    define gmx_mm_maskstore_ps(mem,mask,x)    _mm_maskstore_ps((mem),_mm_castsi128_ps(mask),(x))
++#    define gmx_mm256_maskload_ps(mem,mask)    _mm256_maskload_ps((mem),_mm256_castsi256_ps(mask))
++#    define gmx_mm256_maskstore_ps(mem,mask,x) _mm256_maskstore_ps((mem),_mm256_castsi256_ps(mask),(x))
++#else
++#    define gmx_mm_maskload_ps(mem,mask)       _mm_maskload_ps((mem),(mask))
++#    define gmx_mm_maskstore_ps(mem,mask,x)    _mm_maskstore_ps((mem),(mask),(x))
++#    define gmx_mm256_maskload_ps(mem,mask)    _mm256_maskload_ps((mem),(mask))
++#    define gmx_mm256_maskstore_ps(mem,mask,x) _mm256_maskstore_ps((mem),(mask),(x))
++#endif
 +
 +
 +#endif /* _gmx_x86_avx_256_h_ */
index c6a08cc1d90fe92a1b503f788c11e4491fb0db5a,0000000000000000000000000000000000000000..8b9ed28563c02c7a8febea5cb18d41738c221ef0
mode 100644,000000..100644
--- /dev/null
@@@ -1,98 -1,0 +1,97 @@@
 +/*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gromacs Runs On Most of All Computer Systems
 + */
 +
 +#ifndef _main_h
 +#define _main_h
 +
 +
 +#include <stdio.h>
 +#include "network.h"
 +
 +#ifdef __cplusplus
 +extern "C" {
 +#endif
 +
 +char *gmx_gethostname(char *name, size_t len);
 +/* Sets the hostname to the value given by gethostname, if available,
 + * and to "unknown" otherwise. name should have at least size len.
 + * Returns name.
 + */
 +
 +void gmx_log_open(const char *fn,const t_commrec *cr,
 +                          gmx_bool bMasterOnly, gmx_bool bAppendFiles, FILE**);
 +/* Open the log file, if necessary (nprocs > 1) the logfile name is
 + * communicated around the ring.
 + */
 +
 +void gmx_log_close(FILE *fp);
 +/* Close the log file */
 +
 +void check_multi_int(FILE *log,const gmx_multisim_t *ms,
 +                          int val,const char *name);
 +void check_multi_large_int(FILE *log,const gmx_multisim_t *ms,
 +                           gmx_large_int_t val,const char *name);
 +/* Check if val is the same on all processors for a mdrun -multi run
 + * The string name is used to print to the log file and in a fatal error
 + * if the val's don't match.
 + */
 +void init_multisystem(t_commrec *cr, int nsim, char **multidirs,
 +                      int nfile, const t_filenm fnm[], gmx_bool bParFn);
 +/* Splits the communication into nsim separate simulations
 + * and creates a communication structure between the master
 + * these simulations.
 + * If bParFn is set, the nodeid is appended to the tpx and each output file.
 + */
 +
 +t_commrec *init_par(int *argc,char ***argv_ptr);
 +/* Initiate the parallel computer. Return the communication record
 + * (see network.h). The command line arguments are communicated so that they can be
 + * parsed on each processor.
 + * Arguments are the number of command line arguments, and a pointer to the
 + * array of argument strings. Both are allowed to be NULL.
 + */
 +
 +t_commrec *init_par_threads(const t_commrec *cro);
 +/* Initialize communication records for thread-parallel simulations. 
 +   Must be called on all threads before any communication takes place by 
 +   the individual threads. Copies the original commrec to 
 +   thread-local versions (a small memory leak results because we don't 
 +   deallocate the old shared version).  */
 +
 +#ifdef __cplusplus
 +}
 +#endif
 +
 +#endif        /* _main_h */
index 0c9ed96b3ede79d75c5a860281539feab937cff8,0000000000000000000000000000000000000000..321ed27373db13a8438ab865be853cd19100f896
mode 100644,000000..100644
--- /dev/null
@@@ -1,86 -1,0 +1,88 @@@
 +/*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gromacs Runs On Most of All Computer Systems
 + */
 +
 +#ifndef _mshift_h
 +#define _mshift_h
 +
 +#include "typedefs.h"
 +
 +#ifdef __cplusplus
 +extern "C" {
 +#endif
 +
 +t_graph *mk_graph(FILE *fplog,
 +                       t_idef *idef,int at_start,int at_end,
 +                       gmx_bool bShakeOnly,gmx_bool bSettle);
 +/* Build a graph from an idef description. The graph can be used
 + * to generate mol-shift indices.
++ * at_start and at_end should coincide will molecule boundaries,
++ * for the whole system this is simply 0 and natoms.
 + * If bShakeOnly, only the connections in the shake list are used.
 + * If bSettle && bShakeOnly the settles are used too.
 + */
 +
 +void mk_graph_ilist(FILE *fplog,
 +                         t_ilist *ilist,int at_start,int at_end,
 +                         gmx_bool bShakeOnly,gmx_bool bSettle,
 +                         t_graph *g);
 +/* As mk_graph, but takes t_ilist iso t_idef and does not allocate g */
 +
 +
 +void done_graph(t_graph *g);
 +/* Free the memory in g */
 + 
 +void p_graph(FILE *log,const char *title,t_graph *g);
 +/* Print a graph to log */
 +
 +void mk_mshift(FILE *log,t_graph *g,int ePBC,matrix box,rvec x[]);
 +/* Calculate the mshift codes, based on the connection graph in g. */
 +
 +void shift_x(t_graph *g,matrix box,rvec x[],rvec x_s[]);
 +/* Add the shift vector to x, and store in x_s (may be same array as x) */
 +
 +void shift_self(t_graph *g,matrix box,rvec x[]);
 +/* Id. but in place */
 +
 +void unshift_x(t_graph *g,matrix box,rvec x[],rvec x_s[]);
 +/* Subtract the shift vector from x_s, and store in x (may be same array) */
 +
 +void unshift_self(t_graph *g,matrix box,rvec x[]);
 +/* Id, but in place */
 +
 +#ifdef __cplusplus
 +}
 +#endif
 +
 +#endif        /* _mshift_h */
index 221060f3b9463299382c27fb9a09b1bb839e9161,0000000000000000000000000000000000000000..45f2ab5961582580c05f3cd93d97315a2ab6712a
mode 100644,000000..100644
--- /dev/null
@@@ -1,93 -1,0 +1,93 @@@
-   efEDI, efEDO, 
 +/*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * GRoups of Organic Molecules in ACtion for Science
 + */
 +
 +#ifdef __cplusplus
 +extern "C" {
 +#endif
 +
 + 
 +/* this enum should correspond to the array deffile in gmxlib/filenm.c */
 +enum {
 +  efMDP, efGCT,
 +  efTRX, efTRO, efTRN, efTRR, efTRJ, efXTC, efG87, 
 +  efEDR,
 +  efSTX, efSTO, efGRO, efG96, efPDB, efBRK, efENT, efESP, efPQR, efXYZ,
 +  efCPT,
 +  efLOG, efXVG, efOUT,
 +  efNDX, 
 +  efTOP, efITP,
 +  efTPX, efTPS, efTPR, efTPA, efTPB, 
 +  efTEX, efRTP, efATP, efHDB,
 +  efDAT, efDLG, 
 +  efMAP, efEPS, efMAT, efM2P,
 +  efMTX,
++  efEDI, 
 +  efHAT,
 +  efCUB,
 +  efXPM,
 +  efRND,
 +  efNR
 +};
 +
 +typedef struct {
 +  int  ftp;           /* File type (see enum above)           */
 +  const char *opt;    /* Command line option                  */
 +  const char *fn;       /* File name (as set in source code)  */
 +  unsigned long flag; /* Flag for all kinds of info (see defs)*/
 +  int  nfiles;                /* number of files                      */
 +  char **fns;         /* File names                           */
 +} t_filenm;
 +
 +#define ffSET 1<<0
 +#define ffREAD        1<<1
 +#define ffWRITE       1<<2
 +#define ffOPT 1<<3
 +#define ffLIB 1<<4
 +#define ffMULT        1<<5
 +#define ffRW  (ffREAD | ffWRITE)
 +#define ffOPTRD       (ffREAD | ffOPT)
 +#define ffOPTWR       (ffWRITE| ffOPT)
 +#define ffOPTRW       (ffRW   | ffOPT)
 +#define ffLIBRD       (ffREAD | ffLIB)
 +#define ffLIBOPTRD (ffOPTRD | ffLIB)
 +#define ffRDMULT   (ffREAD  | ffMULT)
 +#define ffOPTRDMULT   (ffRDMULT | ffOPT)
 +#define ffWRMULT   (ffWRITE  | ffMULT)
 +#define ffOPTWRMULT   (ffWRMULT | ffOPT)
 +
 +#ifdef __cplusplus
 +}
 +#endif
 +
index 24ecf8e2f1fa81c0e13f0a4949fdfc2dd32cdcc5,0000000000000000000000000000000000000000..5bcc17b72f4b0335779d69da443844c4157abb1c
mode 100644,000000..100644
--- /dev/null
@@@ -1,69 -1,0 +1,70 @@@
-   int      natoms;      /* Total range for this graph: 0 to natoms      */
 +/*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * GRoups of Organic Molecules in ACtion for Science
 + */
 +
 +#ifndef _types_graph_h
 +#define _types_graph_h
 +
 +#include "idef.h"
 +
 +#ifdef __cplusplus
 +extern "C" {
 +#endif
 +
 +
 +typedef enum { egcolWhite, egcolGrey, egcolBlack, egcolNR } egCol;
 +
 +typedef struct {
++  int      at0;     /* The first atom the graph was constructed for */
++  int      at1;     /* The last atom the graph was constructed for */
 +  int      nnodes;    /* The number of nodes, nnodes=at_end-at_start  */
 +  int      nbound;    /* The number of nodes with edges               */
 +  int      at_start;  /* The first connected atom in this graph       */
 +  int      at_end;    /* The last+1 connected atom in this graph      */
 +  int      *nedge;    /* For each node the number of edges            */
 +  atom_id  **edge;    /* For each node, the actual edges (bidirect.)  */
 +  gmx_bool     bScrewPBC;   /* Screw boundary conditions                    */
 +  ivec     *ishift;   /* Shift for each particle                      */
 +  int      negc;         
 +  egCol   *egc;         /* color of each node */
 +} t_graph;
 +
 +
 +#define SHIFT_IVEC(g,i) ((g)->ishift[i])
 +
 +#ifdef __cplusplus
 +}
 +#endif
 +
 +#endif /* _types_graph_h */
index f1029da4150692bd371eab620528bc3df8406d18,0000000000000000000000000000000000000000..1b447439dfa1397cef0cac83efafe150d9c24b4c
mode 100644,000000..100644
--- /dev/null
@@@ -1,151 -1,0 +1,129 @@@
- /* Comment out this define to use AVX-128 kernels with AVX-256 acceleration */
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + *          GROningen MAchine for Chemical Simulations
 + *
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2012, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + *
 + * For more info, check our website at http://www.gromacs.org
 + *
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +
 +#ifndef NB_VERLET_H
 +#define NB_VERLET_H
 +
 +#include "nbnxn_pairlist.h"
 +#include "nbnxn_cuda_types_ext.h"
 +
 +#ifdef __cplusplus
 +extern "C" {
 +#endif
 +
 +#ifdef GMX_X86_SSE2
 +/* Use SIMD accelerated nbnxn search and kernels */
 +#define GMX_NBNXN_SIMD
 +
 +#ifdef GMX_X86_AVX_256
-  * Currently the 2xNN SIMD kernels only make sense and are only implemented
-  * with AVX-256 in single precision using a 4x4 cluster setup instead of 4x8.
++/* Note that setting this to 128 will also work with AVX-256, but slower */
 +#define GMX_NBNXN_SIMD_BITWIDTH  256
 +#else
 +#define GMX_NBNXN_SIMD_BITWIDTH  128
 +#endif
 +
 +/* The nbnxn SIMD 4xN and 2x(N+N) kernels can be added independently.
- /* Note that _mm_... intrinsics can be converted to either SSE or AVX
-  * depending on compiler flags.
-  * For gcc we check for __AVX__
-  * At least a check for icc should be added (if there is a macro)
-  */
- static const char *nbnxn_kernel_name[nbnxnkNR] =
-   { "not set", "plain C",
- #if !(defined GMX_X86_SSE2)
-     "not available", "not available",
- #else
- #if GMX_NBNXN_SIMD_BITWIDTH == 128
- #if !(defined GMX_X86_AVX_128_FMA || defined __AVX__)
- #ifndef GMX_X86_SSE4_1
-     "SSE2", "SSE2",
- #else
-     "SSE4.1", "SSE4.1",
- #endif
- #else
-     "AVX-128", "AVX-128",
- #endif
- #else
-     "AVX-256",  "AVX-256",
- #endif
- #endif
-     "CUDA", "plain C" };
++ * Currently the 2xNN SIMD kernels only make sense with:
++ *  8-way SIMD: 4x4 setup, works with AVX-256 in single precision
++ * 16-way SIMD: 4x8 setup, not used, but most of the kernel code is there
 + */
 +#define GMX_NBNXN_SIMD_4XN
 +#if GMX_NBNXN_SIMD_BITWIDTH == 256 && !defined GMX_DOUBLE
 +#define GMX_NBNXN_SIMD_2XNN
 +#endif
 +
 +#endif
 +
 +
 +/*! Nonbonded NxN kernel types: plain C, CPU SIMD, GPU CUDA, GPU emulation */
 +typedef enum
 +{
 +    nbnxnkNotSet = 0, 
 +    nbnxnk4x4_PlainC, 
 +    nbnxnk4xN_SIMD_4xN,
 +    nbnxnk4xN_SIMD_2xNN,
 +    nbnxnk8x8x8_CUDA,
 +    nbnxnk8x8x8_PlainC,
 +    nbnxnkNR
 +} nbnxn_kernel_type;
 +
++/*! Return a string indentifying the kernel type */
++const char *lookup_nbnxn_kernel_name(int kernel_type);
 +
 +enum { ewaldexclTable, ewaldexclAnalytical };
 +
 +/* Atom locality indicator: local, non-local, all, used for calls to:
 +   gridding, pair-search, force calculation, x/f buffer operations */
 +enum { eatLocal = 0, eatNonlocal = 1, eatAll  };
 +
 +#define LOCAL_A(x)               ((x) == eatLocal)
 +#define NONLOCAL_A(x)            ((x) == eatNonlocal)
 +#define LOCAL_OR_NONLOCAL_A(x)   (LOCAL_A(x) || NONLOCAL_A(x))
 +
 +/* Interaction locality indicator (used in pair-list search/calculations):
 +    - local interactions require local atom data and affect local output only;
 +    - non-local interactions require both local and non-local atom data and
 +      affect both local- and non-local output. */
 +enum { eintLocal = 0, eintNonlocal = 1 };
 +
 +#define LOCAL_I(x)               ((x) == eintLocal)
 +#define NONLOCAL_I(x)            ((x) == eintNonlocal)
 +
 +enum { enbvClearFNo, enbvClearFYes };
 +
 +typedef struct {
 +    nbnxn_pairlist_set_t nbl_lists;   /* pair list(s)                       */
 +    nbnxn_atomdata_t     *nbat;       /* atom data                          */
 +    int                  kernel_type; /* non-bonded kernel - see enum above */
 +    int                  ewald_excl;  /* Ewald exclusion - see enum above   */
 +} nonbonded_verlet_group_t;
 +
 +/* non-bonded data structure with Verlet-type cut-off */
 +typedef struct {
 +    nbnxn_search_t           nbs;   /* n vs n atom pair searching data       */
 +    int                      ngrp;  /* number of interaction groups          */
 +    nonbonded_verlet_group_t grp[2];/* local and non-local interaction group */
 +
 +    gmx_bool         bUseGPU;          /* TRUE when GPU acceleration is used */
 +    nbnxn_cuda_ptr_t cu_nbv;           /* pointer to CUDA nb verlet data     */
 +    int              min_ci_balanced;  /* pair list balancing parameter
 +                                          used for the 8x8x8 CUDA kernels    */
 +} nonbonded_verlet_t;
 +
 +#ifdef __cplusplus
 +}
 +#endif
 +
 +#endif /* NB_VERLET_H */
index ed9df8553a2e9e333245663cd5d21b50bb1c504f,0000000000000000000000000000000000000000..502d661e2fed6d8fc84e5064ccb1f408ff9b5af6
mode 100644,000000..100644
--- /dev/null
@@@ -1,235 -1,0 +1,245 @@@
-     int shift;          /* Shift vector index plus possible flags */
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + *          GROningen MAchine for Chemical Simulations
 + *
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2012, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + *
 + * For more info, check our website at http://www.gromacs.org
 + *
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +
 +#ifndef _nbnxn_pairlist_h
 +#define _nbnxn_pairlist_h
 +
 +#ifdef __cplusplus
 +extern "C" {
 +#endif
 +
 +/* A buffer data structure of 64 bytes
 + * to be placed at the beginning and end of structs
 + * to avoid cache invalidation of the real contents
 + * of the struct by writes to neighboring memory.
 + */
 +typedef struct {
 +    int dummy[16];
 +} gmx_cache_protect_t;
 +
 +/* Abstract type for pair searching data */
 +typedef struct nbnxn_search * nbnxn_search_t;
 +
 +/* Function that should return a pointer *ptr to memory
 + * of size nbytes.
 + * Error handling should be done within this function.
 + */
 +typedef void nbnxn_alloc_t(void **ptr,size_t nbytes);
 +
 +/* Function that should free the memory pointed to by *ptr.
 + * NULL should not be passed to this function.
 + */
 +typedef void nbnxn_free_t(void *ptr);
 +
 +typedef struct {
 +    int      cj;    /* The j-cluster                    */
 +    unsigned excl;  /* The exclusion (interaction) bits */
 +} nbnxn_cj_t;
 +
++/* In nbnxn_ci_t the integer shift contains the shift in the lower 7 bits.
++ * The upper bits contain information for non-bonded kernel optimization.
++ * Simply calculating LJ and Coulomb for all pairs in a cluster pair is fine.
++ * But three flags can be used to skip interactions, currently only for subc=0
++ * !(shift & NBNXN_CI_DO_LJ(subc))   => we can skip LJ for all pairs
++ * shift & NBNXN_CI_HALF_LJ(subc)    => we can skip LJ for the second half of i
++ * !(shift & NBNXN_CI_DO_COUL(subc)) => we can skip Coulomb for all pairs
++ */
 +#define NBNXN_CI_SHIFT          127
 +#define NBNXN_CI_DO_LJ(subc)    (1<<(7+3*(subc)))
 +#define NBNXN_CI_HALF_LJ(subc)  (1<<(8+3*(subc)))
 +#define NBNXN_CI_DO_COUL(subc)  (1<<(9+3*(subc)))
 +
 +/* Simple pair-list i-unit */
 +typedef struct {
 +    int ci;             /* i-cluster             */
++    int shift;          /* Shift vector index plus possible flags, see above */
 +    int cj_ind_start;   /* Start index into cj   */
 +    int cj_ind_end;     /* End index into cj     */
 +} nbnxn_ci_t;
 +
 +/* Grouped pair-list i-unit */
 +typedef struct {
 +    int sci;            /* i-super-cluster       */
 +    int shift;          /* Shift vector index plus possible flags */
 +    int cj4_ind_start;  /* Start index into cj4  */
 +    int cj4_ind_end;    /* End index into cj4    */
 +} nbnxn_sci_t;
 +
 +typedef struct {
 +    unsigned imask;        /* The i-cluster interactions mask for 1 warp  */
 +    int excl_ind;          /* Index into the exclusion array for 1 warp   */
 +} nbnxn_im_ei_t;
 +
 +typedef struct {
 +    int cj[4];             /* The 4 j-clusters                            */
 +    nbnxn_im_ei_t imei[2]; /* The i-cluster mask data       for 2 warps   */
 +} nbnxn_cj4_t;
 +
 +typedef struct {
 +    unsigned pair[32];     /* Exclusion bits for one warp,                *
 +                            * each unsigned has bit for 4*8 i clusters    */
 +} nbnxn_excl_t;
 +
 +typedef struct {
 +    gmx_cache_protect_t cp0;
 +
 +    nbnxn_alloc_t *alloc;
 +    nbnxn_free_t  *free;
 +
 +    gmx_bool bSimple;      /* Simple list has na_sc=na_s and uses cj   *
 +                            * Complex list uses cj4                    */
 +
 +    int      na_ci;        /* The number of atoms per i-cluster        */
 +    int      na_cj;        /* The number of atoms per j-cluster        */
 +    int      na_sc;        /* The number of atoms per super cluster    */
 +    real     rlist;        /* The radius for constructing the list     */
 +    int      nci;          /* The number of i-clusters in the list     */
 +    nbnxn_ci_t *ci;        /* The i-cluster list, size nci             */
 +    int      ci_nalloc;    /* The allocation size of ci                */
 +    int      nsci;         /* The number of i-super-clusters in the list */
 +    nbnxn_sci_t *sci;      /* The i-super-cluster list                 */
 +    int      sci_nalloc;   /* The allocation size of sci               */
 +
 +    int      ncj;          /* The number of j-clusters in the list     */
 +    nbnxn_cj_t *cj;        /* The j-cluster list, size ncj             */
 +    int      cj_nalloc;    /* The allocation size of cj                */
 +
 +    int      ncj4;         /* The total number of 4*j clusters         */
 +    nbnxn_cj4_t *cj4;      /* The 4*j cluster list, size ncj4          */
 +    int      cj4_nalloc;   /* The allocation size of cj4               */
 +    int      nexcl;        /* The count for excl                       */
 +    nbnxn_excl_t *excl;    /* Atom interaction bits (non-exclusions)   */
 +    int      excl_nalloc;  /* The allocation size for excl             */
 +    int      nci_tot;      /* The total number of i clusters           */
 +
 +    struct nbnxn_list_work *work;
 +
 +    gmx_cache_protect_t cp1;
 +} nbnxn_pairlist_t;
 +
 +typedef struct {
 +    int          nnbl;      /* number of lists */
 +    nbnxn_pairlist_t **nbl; /* lists */
 +    gmx_bool     bCombined; /* TRUE if lists get combined into one (the 1st) */
 +    gmx_bool     bSimple;   /* TRUE if the list of of type "simple"
 +                               (na_sc=na_s, no super-clusters used) */
 +    int          natpair_ljq; /* Total number of atom pairs for LJ+Q kernel */
 +    int          natpair_lj;  /* Total number of atom pairs for LJ kernel   */
 +    int          natpair_q;   /* Total number of atom pairs for Q kernel    */
 +} nbnxn_pairlist_set_t;
 +
 +enum { nbatXYZ, nbatXYZQ, nbatX4, nbatX8 };
 +
 +typedef struct {
 +    real *f;      /* f, size natoms*fstride                             */
 +    real *fshift; /* Shift force array, size SHIFTS*DIM                 */
 +    int  nV;      /* The size of *Vvdw and *Vc                          */
 +    real *Vvdw;   /* Temporary Van der Waals group energy storage       */
 +    real *Vc;     /* Temporary Coulomb group energy storage             */
 +    int  nVS;     /* The size of *VSvdw and *VSc                        */
 +    real *VSvdw;  /* Temporary SIMD Van der Waals group energy storage  */
 +    real *VSc;    /* Temporary SIMD Coulomb group energy storage        */
 +} nbnxn_atomdata_output_t;
 +
 +/* Block size in atoms for the non-bonded thread force-buffer reduction,
 + * should be a multiple of all cell and x86 SIMD sizes (i.e. 2, 4 and 8).
 + * Should be small to reduce the reduction and zeroing cost,
 + * but too small will result in overhead.
 + * Currently the block size is NBNXN_BUFFERFLAG_SIZE*3*sizeof(real)=192 bytes.
 + */
 +#ifdef GMX_DOUBLE
 +#define NBNXN_BUFFERFLAG_SIZE   8
 +#else
 +#define NBNXN_BUFFERFLAG_SIZE  16
 +#endif
 +
 +/* We currently store the reduction flags as bits in an unsigned int.
 + * In most cases this limits the number of flags to 32.
 + * The reduction will automatically disable the flagging and do a full
 + * reduction when the flags won't fit, but this will lead to very slow
 + * reduction. As we anyhow don't expect reasonable performance with
 + * more than 32 threads, we put in this hard limit.
 + * You can increase this number, but the reduction will be very slow.
 + */
 +#define NBNXN_BUFFERFLAG_MAX_THREADS  32
 +
 +/* Flags for telling if threads write to force output buffers */
 +typedef struct {
 +    int nflag;       /* The number of flag blocks                         */
 +    unsigned *flag;  /* Bit i is set when thread i writes to a cell-block */
 +    int flag_nalloc; /* Allocation size of cxy_flag                       */
 +} nbnxn_buffer_flags_t;
 +
 +/* LJ combination rules: geometric, Lorentz-Berthelot, none */
 +enum { ljcrGEOM, ljcrLB, ljcrNONE, ljcrNR };
 +
 +typedef struct {
 +    nbnxn_alloc_t *alloc;
 +    nbnxn_free_t  *free;
 +    int  ntype;      /* The number of different atom types                 */
 +    real *nbfp;      /* Lennard-Jones 6*C6 and 12*C12 params, size ntype^2*2 */
 +    int  comb_rule;  /* Combination rule, see enum above                   */
 +    real *nbfp_comb; /* LJ parameter per atom type, size ntype*2           */
 +    real *nbfp_s4;   /* As nbfp, but with stride 4, size ntype^2*4         */
 +    int  natoms;     /* Number of atoms                                    */
 +    int  natoms_local;  /* Number of local atoms                           */
 +    int  *type;      /* Atom types                                         */
 +    real *lj_comb;   /* LJ parameters per atom for combining for pairs     */
 +    int  XFormat;    /* The format of x (and q), enum                      */
 +    int  FFormat;    /* The format of f, enum                              */
 +    real *q;         /* Charges, can be NULL if incorporated in x          */
 +    int  na_c;       /* The number of atoms per cluster                    */
 +    int  nenergrp;   /* The number of energy groups                        */
 +    int  neg_2log;   /* Log2 of nenergrp                                   */
 +    int  *energrp;   /* The energy groups per cluster, can be NULL         */
 +    gmx_bool bDynamicBox; /* Do we need to update shift_vec every step?    */
 +    rvec *shift_vec; /* Shift vectors, copied from t_forcerec              */
 +    int  xstride;    /* stride for a coordinate in x (usually 3 or 4)      */
 +    int  fstride;    /* stride for a coordinate in f (usually 3 or 4)      */
 +    real *x;         /* x and possibly q, size natoms*xstride              */
++    real *simd_4xn_diag;  /* indices to set the SIMD 4xN diagonal masks    */
++    real *simd_2xnn_diag; /* indices to set the SIMD 2x(N+N)diagonal masks */
 +    int  nout;       /* The number of force arrays                         */
 +    nbnxn_atomdata_output_t *out;  /* Output data structures               */
 +    int  nalloc;     /* Allocation size of all arrays (for x/f *x/fstride) */
 +    gmx_bool bUseBufferFlags; /* Use the flags or operate on all atoms     */
 +    nbnxn_buffer_flags_t buffer_flags; /* Flags for buffer zeroing+reduc.  */
 +} nbnxn_atomdata_t;
 +
 +#ifdef __cplusplus
 +}
 +#endif
 +
 +#endif
index b4256a1206358a7896607de233d16c2731587450,0000000000000000000000000000000000000000..b444598e847d205fddbf66f995d40fdad24813db
mode 100644,000000..100644
--- /dev/null
@@@ -1,231 -1,0 +1,254 @@@
 +/*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * GRoups of Organic Molecules in ACtion for Science
 + */
 +#ifndef _state_h_
 +#define _state_h_
 +
 +
 +#include "simple.h"
 +
 +#ifdef __cplusplus
 +extern "C" {
 +#endif
 +
 +/*
 + * The t_state struct should contain all the (possibly) non-static
 + * information required to define the state of the system.
 + * Currently the random seeds for SD and BD are missing.
 + */
 +
 +/* for now, define the length of the NH chains here */
 +#define NHCHAINLENGTH 10
 +#define MAXLAMBDAS 1024
 +
 +/* These enums are used in flags as (1<<est...).
 + * The order of these enums should not be changed,
 + * since that affects the checkpoint (.cpt) file format.
 + */
 +  enum { estLAMBDA,
 +         estBOX, estBOX_REL, estBOXV, estPRES_PREV, estNH_XI,  estTC_INT,
 +         estX,   estV,       estSDX,  estCGP,       estLD_RNG, estLD_RNGI, 
 +         estDISRE_INITF, estDISRE_RM3TAV,
 +         estORIRE_INITF, estORIRE_DTAV,
 +         estSVIR_PREV, estNH_VXI, estVETA, estVOL0, estNHPRES_XI, estNHPRES_VXI, estFVIR_PREV,
 +         estFEPSTATE, estMC_RNG, estMC_RNGI,
 +         estNR };
 +
 +#define EST_DISTR(e) (!(((e) >= estLAMBDA && (e) <= estTC_INT) || ((e) >= estSVIR_PREV && (e) <= estMC_RNGI)))
 +
 +/* The names of the state entries, defined in src/gmxlib/checkpoint.c */
 +extern const char *est_names[estNR];
 +
 +typedef struct
 +{
 +  real disre_initf;    /* The scaling factor for initializing the time av. */
 +  int  ndisrepairs;    /* The number of distance restraints                */
 +  real *disre_rm3tav;  /* The r^-3 time averaged pair distances            */
 +  real orire_initf;    /* The scaling factor for initializing the time av. */
 +  int  norire_Dtav;    /* The number of matrix element in dtav (npair*5)   */
 +  real *orire_Dtav;    /* The time averaged orientation tensors            */
 +} history_t;
 +
 +/* Struct used for checkpointing only.
 + * This struct would not be required with unlimited precision.
 + * But because of limited precision, the COM motion removal implementation
 + * can cause the kinetic energy in the MD loop to differ by a few bits from
 + * the kinetic energy one would determine from state.v.
 + */
 +typedef struct
 +{
 +  gmx_bool     bUpToDate;
 +  int      ekin_n;
 +  tensor  *ekinh;
 +  tensor  *ekinf;
 +  tensor  *ekinh_old;
 +  tensor   ekin_total;
 +  double  *ekinscalef_nhc;
 +  double  *ekinscaleh_nhc;
 +  double  *vscale_nhc;
 +  real     dekindl;
 +  real     mvcos;
 +} ekinstate_t;
 +
 +/* energy history for delta_h histograms */
 +typedef struct
 +{
 +    int nndh;           /* the number of energy difference lists */
 +    int  *ndh;          /* the number in each energy difference list */
 +    real **dh;          /* the energy difference lists */
 +
 +    double start_time;     /* the start time of these energy diff blocks */
 +    double start_lambda;   /* lambda at start time */
 +
 +    gmx_bool start_lambda_set; /* whether the lambda value is set. Here
 +                                  For backward-compatibility. */
 +} delta_h_history_t; 
 +
 +typedef struct
 +{
 +  int nlambda;               /* total number of lambda states - for history*/
 +
 +  gmx_bool bEquil;           /* reached equilibration */
 +  int  *n_at_lam;            /* number of points observed at each lambda */
 +  real *wl_histo;            /* histogram for WL flatness determination */
 +  real wl_delta;             /* current wang-landau delta */
 +
 +  real *sum_weights;         /* weights of the states */
 +  real *sum_dg;              /* free energies of the states -- not actually used for weighting, but informational */
 +  real *sum_minvar;          /* corrections to weights for minimum variance */
 +  real *sum_variance;        /* variances of the states */
 +
 +  real **accum_p;            /* accumulated bennett weights for n+1 */
 +  real **accum_m;            /* accumulated bennett weights for n-1 */
 +  real **accum_p2;           /* accumulated squared bennett weights for n+1 */
 +  real **accum_m2;           /* accumulated squared bennett weights for n-1 */
 +
 +  real **Tij;                /* transition matrix */
 +  real **Tij_empirical;      /* Empirical transition matrix */
 +} df_history_t;
 +
 +typedef struct
 +{
 +  gmx_large_int_t nsteps;       /* The number of steps in the history            */
 +  gmx_large_int_t nsum;         /* The nr. of steps in the ener_ave and ener_sum */
 +  double *   ener_ave;     /* Energy term history sum to get fluctuations   */
 +  double *   ener_sum;     /* Energy term history sum to get fluctuations   */
 +  int        nener;        /* Number of energy terms in two previous arrays */
 +  gmx_large_int_t nsteps_sim;   /* The number of steps in ener_sum_sim      */
 +  gmx_large_int_t nsum_sim;     /* The number of frames in ener_sum_sim     */
 +  double *   ener_sum_sim; /* Energy term history sum of the whole sim      */
 +
 +  delta_h_history_t *dht;  /* The BAR energy differences */
 +}
 +energyhistory_t;
 +
++typedef struct
++{
++    /* If one uses essential dynamics or flooding on a group of atoms from
++     * more than one molecule, we cannot make this group whole with
++     * do_pbc_first_mtop(). We assume that the ED group has the correct PBC
++     * representation at the beginning of the simulation and keep track
++     * of the shifts to always get it into that representation.
++     * For proper restarts from a checkpoint we store the positions of the
++     * reference group at the time of checkpoint writing */
++    gmx_bool    bFromCpt;       /* Did we start from a checkpoint file?       */
++    int         nED;            /* No. of ED/Flooding data sets, if <1 no ED  */
++    int         *nref;          /* No. of atoms in i'th reference structure   */
++    int         *nav;           /* Same for average structure                 */
++    rvec        **old_sref;     /* Positions of the reference atoms
++                                   at the last time step (with correct PBC
++                                   representation)                            */
++    rvec        **old_sref_p;   /* Pointer to these positions                 */
++    rvec        **old_sav;      /* Same for the average positions             */
++    rvec        **old_sav_p;
++}
++edsamstate_t;
++
 +typedef struct
 +{
 +  int           natoms;
 +  int           ngtc;
 +  int           nnhpres;
 +  int           nhchainlength; /* number of nose-hoover chains               */
 +  int           nrng;
 +  int           nrngi;
 +  int           flags;  /* Flags telling which entries are present      */
 +  int           fep_state; /* indicates which of the alchemical states we are in                 */
 +  real          *lambda; /* lambda vector                               */
 +  matrix      box;    /* box vector coordinates                       */
 +  matrix      box_rel; /* Relitaive box vectors to preserve shape     */
 +  matrix      boxv;   /* box velocitites for Parrinello-Rahman pcoupl */
 +  matrix        pres_prev; /* Pressure of the previous step for pcoupl  */
 +  matrix        svir_prev; /* Shake virial for previous step for pcoupl */
 +  matrix        fvir_prev; /* Force virial of the previous step for pcoupl  */
 +  double        *nosehoover_xi;  /* for Nose-Hoover tcoupl (ngtc)       */
 +  double        *nosehoover_vxi; /* for N-H tcoupl (ngtc)               */
 +  double        *nhpres_xi;  /* for Nose-Hoover pcoupl for barostat     */
 +  double        *nhpres_vxi; /* for Nose-Hoover pcoupl for barostat     */
 +  double        *therm_integral; /* for N-H/V-rescale tcoupl (ngtc)     */
 +  real          veta; /* trotter based isotropic P-coupling             */
 +  real          vol0; /* initial volume,required for computing NPT conserverd quantity */
 +  int           nalloc; /* Allocation size for x, v and sd_x when !=NULL*/
 +  rvec          *x;     /* the coordinates (natoms)                     */
 +  rvec          *v;     /* the velocities (natoms)                      */
 +  rvec          *sd_X;  /* random part of the x update for stoch. dyn.  */
 +  rvec          *cg_p;  /* p vector for conjugate gradient minimization */
 +
 +  unsigned int  *ld_rng;  /* RNG random state                           */
 +  int           *ld_rngi; /* RNG index                                  */
 +
 +  int           nmcrng;   /* number of RNG states                       */
 +  unsigned int  *mc_rng;  /* lambda MC RNG random state                 */
 +  int           *mc_rngi; /* lambda MC RNG index                        */
 +
 +  history_t     hist;   /* Time history for restraints                  */
 +
 +  ekinstate_t   ekinstate; /* The state of the kinetic energy data      */
 +
 +  energyhistory_t  enerhist; /* Energy history for statistics           */
 +  df_history_t  dfhist; /*Free energy history for free energy analysis  */
++  edsamstate_t  edsamstate;    /* Essential dynamics / flooding history */
 +
 +  int           ddp_count; /* The DD partitioning count for this state  */
 +  int           ddp_count_cg_gl; /* The DD part. count for index_gl     */
 +  int           ncg_gl; /* The number of local charge groups            */
 +  int           *cg_gl; /* The global cg number of the local cgs        */
 +  int           cg_gl_nalloc; /* Allocation size of cg_gl;              */
 +} t_state;
 +
 +typedef struct 
 +{ 
 +  double *Qinv;  /* inverse mass of thermostat -- computed from inputs, but a good place to store */
 +  double *QPinv; /* inverse mass of thermostat for barostat -- computed from inputs, but a good place to store */
 +  double Winv;   /* Pressure mass inverse -- computed, not input, but a good place to store. Need to make a matrix later */
 +  tensor Winvm;  /* inverse pressure mass tensor, computed       */       
 +} t_extmass;
 +
 +
 +typedef struct
 +{ 
 +  real veta;   
 +  double rscale;
 +  double vscale;
 +  double rvscale;
 +  double alpha;
 +  double *vscale_nhc;
 +} t_vetavars;
 +
 +#ifdef __cplusplus
 +}
 +#endif
 +
 +
 +#endif /* _state_h_ */
index 0660994ddc46eba9b80f301a130672d67b7925f0,0000000000000000000000000000000000000000..02f8bd41a9e2584723cf276ce71ab50b46a4032a
mode 100644,000000..100644
--- /dev/null
@@@ -1,909 -1,0 +1,875 @@@
- #ifdef GMX_POWERPC_SQRT
- static real gmx_powerpc_invsqrt(real x)
- {
-   const real  half=0.5;
-   const real  three=3.0;
-   t_convert   result,bit_pattern;
-   unsigned int exp,fract;
-   real        lu;
-   real        y;
- #ifdef GMX_DOUBLE
-   real        y2;
- #endif
-   lu = __frsqrte((double)x);
-   y=(half*lu*(three-((x*lu)*lu)));
- #if (GMX_POWERPC_SQRT==2)
-   /* Extra iteration required */
-   y=(half*y*(three-((x*y)*y)));
- #endif
- #ifdef GMX_DOUBLE
-   y2=(half*y*(three-((x*y)*y)));
-   return y2;                    /* 10 Flops */
- #else
-   return y;                     /* 5  Flops */
- #endif
- }
- #define gmx_invsqrt(x) gmx_powerpc_invsqrt(x)
- #define INVSQRT_DONE
- #endif /* powerpc_invsqrt */
 +/*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gromacs Runs On Most of All Computer Systems
 + */
 +#ifndef _vec_h
 +#define _vec_h
 +
 +/*
 +  collection of in-line ready operations:
 +  
 +  lookup-table optimized scalar operations:
 +  real gmx_invsqrt(real x)
 +  void vecinvsqrt(real in[],real out[],int n)
 +  void vecrecip(real in[],real out[],int n)
 +  real sqr(real x)
 +  double dsqr(double x)
 +  
 +  vector operations:
 +  void rvec_add(const rvec a,const rvec b,rvec c)  c = a + b
 +  void dvec_add(const dvec a,const dvec b,dvec c)  c = a + b
 +  void ivec_add(const ivec a,const ivec b,ivec c)  c = a + b
 +  void rvec_inc(rvec a,const rvec b)               a += b
 +  void dvec_inc(dvec a,const dvec b)               a += b
 +  void ivec_inc(ivec a,const ivec b)               a += b
 +  void rvec_sub(const rvec a,const rvec b,rvec c)  c = a - b
 +  void dvec_sub(const dvec a,const dvec b,dvec c)  c = a - b
 +  void rvec_dec(rvec a,rvec b)                     a -= b
 +  void copy_rvec(const rvec a,rvec b)              b = a (reals)
 +  void copy_dvec(const dvec a,dvec b)              b = a (reals)
 +  void copy_ivec(const ivec a,ivec b)              b = a (integers)
 +  void ivec_sub(const ivec a,const ivec b,ivec c)  c = a - b
 +  void svmul(real a,rvec v1,rvec v2)               v2 = a * v1
 +  void dsvmul(double a,dvec v1,dvec v2)            v2 = a * v1
 +  void clear_rvec(rvec a)                          a = 0
 +  void clear_dvec(dvec a)                          a = 0
 +  void clear_ivec(rvec a)                          a = 0
 +  void clear_rvecs(int n,rvec v[])
 +  real iprod(rvec a,rvec b)                        = a . b (inner product)
 +  double diprod(dvec a,dvec b)                     = a . b (inner product)
 +  real iiprod(ivec a,ivec b)                       = a . b (integers)
 +  real norm2(rvec a)                               = | a |^2 ( = x*y*z )
 +  double dnorm2(dvec a)                            = | a |^2 ( = x*y*z )
 +  real norm(rvec a)                                = | a |
 +  double dnorm(dvec a)                             = | a |
 +  void cprod(rvec a,rvec b,rvec c)                 c = a x b (cross product)
 +  void dprod(rvec a,rvec b,rvec c)                 c = a x b (cross product)
 +  void dprod(rvec a,rvec b,rvec c)                 c = a * b (direct product)
 +  real cos_angle(rvec a,rvec b)
 +  real cos_angle_no_table(rvec a,rvec b)
 +  real distance2(rvec v1, rvec v2)                 = | v2 - v1 |^2
 +  void unitv(rvec src,rvec dest)                   dest = src / |src|
 +  void unitv_no_table(rvec src,rvec dest)          dest = src / |src|
 +  
 +  matrix (3x3) operations:
 +    ! indicates that dest should not be the same as a, b or src
 +    the _ur0 varieties work on matrices that have only zeros
 +    in the upper right part, such as box matrices, these varieties
 +    could produce less rounding errors, not due to the operations themselves,
 +    but because the compiler can easier recombine the operations
 +  void copy_mat(matrix a,matrix b)                 b = a
 +  void clear_mat(matrix a)                       a = 0
 +  void mmul(matrix a,matrix b,matrix dest)    !  dest = a . b
 +  void mmul_ur0(matrix a,matrix b,matrix dest)     dest = a . b
 +  void transpose(matrix src,matrix dest)      !  dest = src*
 +  void tmmul(matrix a,matrix b,matrix dest)   !  dest = a* . b
 +  void mtmul(matrix a,matrix b,matrix dest)   !  dest = a . b*
 +  real det(matrix a)                             = det(a)
 +  void m_add(matrix a,matrix b,matrix dest)      dest = a + b
 +  void m_sub(matrix a,matrix b,matrix dest)      dest = a - b
 +  void msmul(matrix m1,real r1,matrix dest)      dest = r1 * m1
 +  void m_inv_ur0(matrix src,matrix dest)           dest = src^-1
 +  void m_inv(matrix src,matrix dest)          !  dest = src^-1
 +  void mvmul(matrix a,rvec src,rvec dest)     !  dest = a . src
 +  void mvmul_ur0(matrix a,rvec src,rvec dest)      dest = a . src
 +  void tmvmul_ur0(matrix a,rvec src,rvec dest)     dest = a* . src
 +  real trace(matrix m)                             = trace(m)
 +*/
 +
 +#include "types/simple.h"
 +#include "maths.h"
 +#include "typedefs.h"
 +#include "sysstuff.h"
 +#include "gmx_fatal.h"
 +#include "physics.h"
 +
 +#ifdef __cplusplus
 +extern "C" {
 +#elif 0
 +} /* avoid screwing up indentation */
 +#endif
 +
 +
 +#define EXP_LSB         0x00800000
 +#define EXP_MASK        0x7f800000
 +#define EXP_SHIFT       23
 +#define FRACT_MASK      0x007fffff
 +#define FRACT_SIZE      11              /* significant part of fraction */
 +#define FRACT_SHIFT     (EXP_SHIFT-FRACT_SIZE)
 +#define EXP_ADDR(val)   (((val)&EXP_MASK)>>EXP_SHIFT)
 +#define FRACT_ADDR(val) (((val)&(FRACT_MASK|EXP_LSB))>>FRACT_SHIFT)
 +
 +#define PR_VEC(a)       a[XX],a[YY],a[ZZ]
 +
 +#ifdef GMX_SOFTWARE_INVSQRT
 +extern const unsigned int *  gmx_invsqrt_exptab;
 +extern const unsigned int *  gmx_invsqrt_fracttab;
 +#endif
 +
 +
 +typedef union 
 +{
 +  unsigned int bval;
 +  float fval;
 +} t_convert;
 +
 +
 +#ifdef GMX_SOFTWARE_INVSQRT
 +static real gmx_software_invsqrt(real x)
 +{
 +  const real  half=0.5;
 +  const real  three=3.0;
 +  t_convert   result,bit_pattern;
 +  unsigned int exp,fract;
 +  real        lu;
 +  real        y;
 +#ifdef GMX_DOUBLE
 +  real        y2;
 +#endif
 + 
 +  bit_pattern.fval=x;
 +  exp   = EXP_ADDR(bit_pattern.bval);
 +  fract = FRACT_ADDR(bit_pattern.bval);
 +  result.bval=gmx_invsqrt_exptab[exp] | gmx_invsqrt_fracttab[fract];
 +  lu    = result.fval;
 +  
 +  y=(half*lu*(three-((x*lu)*lu)));
 +#ifdef GMX_DOUBLE
 +  y2=(half*y*(three-((x*y)*y)));
 +  
 +  return y2;                    /* 10 Flops */
 +#else
 +  return y;                     /* 5  Flops */
 +#endif
 +}
 +#define gmx_invsqrt(x) gmx_software_invsqrt(x)
 +#define INVSQRT_DONE 
 +#endif /* gmx_invsqrt */
 +
 +#ifndef INVSQRT_DONE
 +#    ifdef GMX_DOUBLE
 +#        ifdef HAVE_RSQRT
 +#            define gmx_invsqrt(x)     rsqrt(x)
 +#        else
 +#            define gmx_invsqrt(x)     (1.0/sqrt(x))
 +#        endif
 +#    else /* single */
 +#        ifdef HAVE_RSQRTF
 +#            define gmx_invsqrt(x)     rsqrtf(x)
 +#        elif defined HAVE_RSQRT
 +#            define gmx_invsqrt(x)     rsqrt(x)
 +#        elif defined HAVE_SQRTF
 +#            define gmx_invsqrt(x)     (1.0/sqrtf(x))
 +#        else
 +#            define gmx_invsqrt(x)     (1.0/sqrt(x))
 +#        endif
 +#    endif
 +#endif
 +
 +
 +static real sqr(real x)
 +{
 +  return (x*x);
 +}
 +
 +static gmx_inline double dsqr(double x)
 +{
 +  return (x*x);
 +}
 +
 +/* Maclaurin series for sinh(x)/x, useful for NH chains and MTTK pressure control 
 +   Here, we compute it to 10th order, which might be overkill, 8th is probably enough, 
 +   but it's not very much more expensive. */
 +
 +static gmx_inline real series_sinhx(real x) 
 +{
 +  real x2 = x*x;
 +  return (1 + (x2/6.0)*(1 + (x2/20.0)*(1 + (x2/42.0)*(1 + (x2/72.0)*(1 + (x2/110.0))))));
 +}
 +
 +void vecinvsqrt(real in[],real out[],int n);
 +/* Perform out[i]=1.0/sqrt(in[i]) for n elements */
 +
 +
 +void vecrecip(real in[],real out[],int n);
 +/* Perform out[i]=1.0/(in[i]) for n elements */
 +
 +/* Note: If you need a fast version of vecinvsqrt 
 + * and/or vecrecip, call detectcpu() and run the SSE/3DNow/SSE2/Altivec
 + * versions if your hardware supports it.
 + *
 + * To use those routines, your memory HAS TO BE CACHE-ALIGNED.
 + * Use snew_aligned(ptr,size,32) to allocate and sfree_aligned to free.
 + */
 +
 +
 +static gmx_inline void rvec_add(const rvec a,const rvec b,rvec c)
 +{
 +  real x,y,z;
 +  
 +  x=a[XX]+b[XX];
 +  y=a[YY]+b[YY];
 +  z=a[ZZ]+b[ZZ];
 +  
 +  c[XX]=x;
 +  c[YY]=y;
 +  c[ZZ]=z;
 +}
 +
 +static gmx_inline void dvec_add(const dvec a,const dvec b,dvec c)
 +{
 +  double x,y,z;
 +  
 +  x=a[XX]+b[XX];
 +  y=a[YY]+b[YY];
 +  z=a[ZZ]+b[ZZ];
 +  
 +  c[XX]=x;
 +  c[YY]=y;
 +  c[ZZ]=z;
 +}
 +
 +static gmx_inline void ivec_add(const ivec a,const ivec b,ivec c)
 +{
 +  int x,y,z;
 +  
 +  x=a[XX]+b[XX];
 +  y=a[YY]+b[YY];
 +  z=a[ZZ]+b[ZZ];
 +  
 +  c[XX]=x;
 +  c[YY]=y;
 +  c[ZZ]=z;
 +}
 +
 +static gmx_inline void rvec_inc(rvec a,const rvec b)
 +{
 +  real x,y,z;
 +  
 +  x=a[XX]+b[XX];
 +  y=a[YY]+b[YY];
 +  z=a[ZZ]+b[ZZ];
 +  
 +  a[XX]=x;
 +  a[YY]=y;
 +  a[ZZ]=z;
 +}
 +
 +static gmx_inline void dvec_inc(dvec a,const dvec b)
 +{
 +  double x,y,z;
 +
 +  x=a[XX]+b[XX];
 +  y=a[YY]+b[YY];
 +  z=a[ZZ]+b[ZZ];
 +
 +  a[XX]=x;
 +  a[YY]=y;
 +  a[ZZ]=z;
 +}
 +
 +static gmx_inline void rvec_sub(const rvec a,const rvec b,rvec c)
 +{
 +  real x,y,z;
 +  
 +  x=a[XX]-b[XX];
 +  y=a[YY]-b[YY];
 +  z=a[ZZ]-b[ZZ];
 +  
 +  c[XX]=x;
 +  c[YY]=y;
 +  c[ZZ]=z;
 +}
 +
 +static gmx_inline void dvec_sub(const dvec a,const dvec b,dvec c)
 +{
 +  double x,y,z;
 +  
 +  x=a[XX]-b[XX];
 +  y=a[YY]-b[YY];
 +  z=a[ZZ]-b[ZZ];
 +  
 +  c[XX]=x;
 +  c[YY]=y;
 +  c[ZZ]=z;
 +}
 +
 +static gmx_inline void rvec_dec(rvec a,const rvec b)
 +{
 +  real x,y,z;
 +  
 +  x=a[XX]-b[XX];
 +  y=a[YY]-b[YY];
 +  z=a[ZZ]-b[ZZ];
 +  
 +  a[XX]=x;
 +  a[YY]=y;
 +  a[ZZ]=z;
 +}
 +
 +static gmx_inline void copy_rvec(const rvec a,rvec b)
 +{
 +  b[XX]=a[XX];
 +  b[YY]=a[YY];
 +  b[ZZ]=a[ZZ];
 +}
 +
 +static gmx_inline void copy_rvecn(rvec *a,rvec *b,int startn, int endn)
 +{
 +  int i;
 +  for (i=startn;i<endn;i++) {
 +    b[i][XX]=a[i][XX];
 +    b[i][YY]=a[i][YY];
 +    b[i][ZZ]=a[i][ZZ];
 +  }
 +}
 +
 +static gmx_inline void copy_dvec(const dvec a,dvec b)
 +{
 +  b[XX]=a[XX];
 +  b[YY]=a[YY];
 +  b[ZZ]=a[ZZ];
 +}
 +
 +static gmx_inline void copy_ivec(const ivec a,ivec b)
 +{
 +  b[XX]=a[XX];
 +  b[YY]=a[YY];
 +  b[ZZ]=a[ZZ];
 +}
 +
 +static gmx_inline void ivec_sub(const ivec a,const ivec b,ivec c)
 +{
 +  int x,y,z;
 +  
 +  x=a[XX]-b[XX];
 +  y=a[YY]-b[YY];
 +  z=a[ZZ]-b[ZZ];
 +  
 +  c[XX]=x;
 +  c[YY]=y;
 +  c[ZZ]=z;
 +}
 +
 +static gmx_inline void copy_mat(matrix a,matrix b)
 +{
 +  copy_rvec(a[XX],b[XX]);
 +  copy_rvec(a[YY],b[YY]);
 +  copy_rvec(a[ZZ],b[ZZ]);
 +}
 +
 +static gmx_inline void svmul(real a,const rvec v1,rvec v2)
 +{
 +  v2[XX]=a*v1[XX];
 +  v2[YY]=a*v1[YY];
 +  v2[ZZ]=a*v1[ZZ];
 +}
 +
 +static gmx_inline void dsvmul(double a,const dvec v1,dvec v2)
 +{
 +  v2[XX]=a*v1[XX];
 +  v2[YY]=a*v1[YY];
 +  v2[ZZ]=a*v1[ZZ];
 +}
 +
 +static gmx_inline real distance2(const rvec v1,const rvec v2)
 +{
 +  return sqr(v2[XX]-v1[XX]) + sqr(v2[YY]-v1[YY]) + sqr(v2[ZZ]-v1[ZZ]);
 +}
 +
 +static gmx_inline void clear_rvec(rvec a)
 +{
 +  /* The ibm compiler has problems with inlining this 
 +   * when we use a const real variable
 +   */
 +  a[XX]=0.0;
 +  a[YY]=0.0;
 +  a[ZZ]=0.0;
 +}
 +
 +static gmx_inline void clear_dvec(dvec a)
 +{
 +  /* The ibm compiler has problems with inlining this 
 +   * when we use a const real variable
 +   */
 +  a[XX]=0.0;
 +  a[YY]=0.0;
 +  a[ZZ]=0.0;
 +}
 +
 +static gmx_inline void clear_ivec(ivec a)
 +{
 +  a[XX]=0;
 +  a[YY]=0;
 +  a[ZZ]=0;
 +}
 +
 +static gmx_inline void clear_rvecs(int n,rvec v[])
 +{
 +/*  memset(v[0],0,DIM*n*sizeof(v[0][0])); */
 +  int i;
 +    
 +  for(i=0; (i<n); i++) 
 +    clear_rvec(v[i]);
 +}
 +
 +static gmx_inline void clear_mat(matrix a)
 +{
 +/*  memset(a[0],0,DIM*DIM*sizeof(a[0][0])); */
 +  
 +  const real nul=0.0;
 +  
 +  a[XX][XX]=a[XX][YY]=a[XX][ZZ]=nul;
 +  a[YY][XX]=a[YY][YY]=a[YY][ZZ]=nul;
 +  a[ZZ][XX]=a[ZZ][YY]=a[ZZ][ZZ]=nul;
 +}
 +
 +static gmx_inline real iprod(const rvec a,const rvec b)
 +{
 +  return (a[XX]*b[XX]+a[YY]*b[YY]+a[ZZ]*b[ZZ]);
 +}
 +
 +static gmx_inline double diprod(const dvec a,const dvec b)
 +{
 +  return (a[XX]*b[XX]+a[YY]*b[YY]+a[ZZ]*b[ZZ]);
 +}
 +
 +static gmx_inline int iiprod(const ivec a,const ivec b)
 +{
 +  return (a[XX]*b[XX]+a[YY]*b[YY]+a[ZZ]*b[ZZ]);
 +}
 +
 +static gmx_inline real norm2(const rvec a)
 +{
 +  return a[XX]*a[XX]+a[YY]*a[YY]+a[ZZ]*a[ZZ];
 +}
 +
 +static gmx_inline double dnorm2(const dvec a)
 +{
 +  return a[XX]*a[XX]+a[YY]*a[YY]+a[ZZ]*a[ZZ];
 +}
 +
 +/* WARNING:
 + * As dnorm() uses sqrt() (which is slow) _only_ use it if you are sure you
 + * don't need 1/dnorm(), otherwise use dnorm2()*dinvnorm(). */
 +static gmx_inline double dnorm(const dvec a)
 +{
 +  return sqrt(diprod(a, a));
 +}
 +
 +/* WARNING:
 + * As norm() uses sqrtf() (which is slow) _only_ use it if you are sure you
 + * don't need 1/norm(), otherwise use norm2()*invnorm(). */
 +static gmx_inline real norm(const rvec a)
 +{
 +  /* This is ugly, but we deliberately do not define gmx_sqrt() and handle the
 +   * float/double case here instead to avoid gmx_sqrt() being accidentally used. */
 +#ifdef GMX_DOUBLE
 +  return dnorm(a);
 +#elif defined HAVE_SQRTF
 +  return sqrtf(iprod(a, a));
 +#else
 +  return sqrt(iprod(a, a));
 +#endif
 +}
 +
 +static gmx_inline real invnorm(const rvec a)
 +{
 +    return gmx_invsqrt(norm2(a));
 +}
 +
 +static gmx_inline real dinvnorm(const dvec a)
 +{
 +    return gmx_invsqrt(dnorm2(a));
 +}
 +
 +/* WARNING:
 + * Do _not_ use these routines to calculate the angle between two vectors
 + * as acos(cos_angle(u,v)). While it might seem obvious, the acos function
 + * is very flat close to -1 and 1, which will lead to accuracy-loss.
 + * Instead, use the new gmx_angle() function directly.
 + */
 +static gmx_inline real 
 +cos_angle(const rvec a,const rvec b)
 +{
 +  /* 
 +   *                  ax*bx + ay*by + az*bz
 +   * cos-vec (a,b) =  ---------------------
 +   *                      ||a|| * ||b||
 +   */
 +  real   cosval;
 +  int    m;
 +  double aa,bb,ip,ipa,ipb,ipab; /* For accuracy these must be double! */
 +  
 +  ip=ipa=ipb=0.0;
 +  for(m=0; (m<DIM); m++) {            /* 18           */
 +    aa   = a[m];
 +    bb   = b[m];
 +    ip  += aa*bb;
 +    ipa += aa*aa;
 +    ipb += bb*bb;
 +  }
 +  ipab = ipa*ipb;
 +  if (ipab > 0)
 +    cosval = ip*gmx_invsqrt(ipab);            /*  7           */
 +  else 
 +    cosval = 1;
 +                                      /* 25 TOTAL     */
 +  if (cosval > 1.0) 
 +    return  1.0; 
 +  if (cosval <-1.0) 
 +    return -1.0;
 +  
 +  return cosval;
 +}
 +
 +/* WARNING:
 + * Do _not_ use these routines to calculate the angle between two vectors
 + * as acos(cos_angle(u,v)). While it might seem obvious, the acos function
 + * is very flat close to -1 and 1, which will lead to accuracy-loss.
 + * Instead, use the new gmx_angle() function directly.
 + */
 +static gmx_inline real 
 +cos_angle_no_table(const rvec a,const rvec b)
 +{
 +  /* This version does not need the invsqrt lookup table */
 +  real   cosval;
 +  int    m;
 +  double aa,bb,ip,ipa,ipb; /* For accuracy these must be double! */
 +  
 +  ip=ipa=ipb=0.0;
 +  for(m=0; (m<DIM); m++) {            /* 18           */
 +    aa   = a[m];
 +    bb   = b[m];
 +    ip  += aa*bb;
 +    ipa += aa*aa;
 +    ipb += bb*bb;
 +  }
 +  cosval=ip/sqrt(ipa*ipb);            /* 12           */
 +                                      /* 30 TOTAL     */
 +  if (cosval > 1.0) 
 +    return  1.0; 
 +  if (cosval <-1.0) 
 +    return -1.0;
 +  
 +  return cosval;
 +}
 +
 +
 +static gmx_inline void cprod(const rvec a,const rvec b,rvec c)
 +{
 +  c[XX]=a[YY]*b[ZZ]-a[ZZ]*b[YY];
 +  c[YY]=a[ZZ]*b[XX]-a[XX]*b[ZZ];
 +  c[ZZ]=a[XX]*b[YY]-a[YY]*b[XX];
 +}
 +
 +static gmx_inline void dcprod(const dvec a,const dvec b,dvec c)
 +{
 +  c[XX]=a[YY]*b[ZZ]-a[ZZ]*b[YY];
 +  c[YY]=a[ZZ]*b[XX]-a[XX]*b[ZZ];
 +  c[ZZ]=a[XX]*b[YY]-a[YY]*b[XX];
 +}
 +
 +/* This routine calculates the angle between a & b without any loss of accuracy close to 0/PI.
 + * If you only need cos(theta), use the cos_angle() routines to save a few cycles.
 + * This routine is faster than it might appear, since atan2 is accelerated on many CPUs (e.g. x86).
 + */
 +static gmx_inline real 
 +gmx_angle(const rvec a, const rvec b)
 +{
 +    rvec w;
 +    real wlen,s;
 +    
 +    cprod(a,b,w);
 +    
 +    wlen  = norm(w);
 +    s     = iprod(a,b);
 +    
 +    return atan2(wlen,s);
 +}
 +
 +static gmx_inline void mmul_ur0(matrix a,matrix b,matrix dest)
 +{
 +  dest[XX][XX]=a[XX][XX]*b[XX][XX];
 +  dest[XX][YY]=0.0;
 +  dest[XX][ZZ]=0.0;
 +  dest[YY][XX]=a[YY][XX]*b[XX][XX]+a[YY][YY]*b[YY][XX];
 +  dest[YY][YY]=                    a[YY][YY]*b[YY][YY];
 +  dest[YY][ZZ]=0.0;
 +  dest[ZZ][XX]=a[ZZ][XX]*b[XX][XX]+a[ZZ][YY]*b[YY][XX]+a[ZZ][ZZ]*b[ZZ][XX];
 +  dest[ZZ][YY]=                    a[ZZ][YY]*b[YY][YY]+a[ZZ][ZZ]*b[ZZ][YY];
 +  dest[ZZ][ZZ]=                                        a[ZZ][ZZ]*b[ZZ][ZZ];
 +}
 +
 +static gmx_inline void mmul(matrix a,matrix b,matrix dest)
 +{
 +  dest[XX][XX]=a[XX][XX]*b[XX][XX]+a[XX][YY]*b[YY][XX]+a[XX][ZZ]*b[ZZ][XX];
 +  dest[YY][XX]=a[YY][XX]*b[XX][XX]+a[YY][YY]*b[YY][XX]+a[YY][ZZ]*b[ZZ][XX];
 +  dest[ZZ][XX]=a[ZZ][XX]*b[XX][XX]+a[ZZ][YY]*b[YY][XX]+a[ZZ][ZZ]*b[ZZ][XX];
 +  dest[XX][YY]=a[XX][XX]*b[XX][YY]+a[XX][YY]*b[YY][YY]+a[XX][ZZ]*b[ZZ][YY];
 +  dest[YY][YY]=a[YY][XX]*b[XX][YY]+a[YY][YY]*b[YY][YY]+a[YY][ZZ]*b[ZZ][YY];
 +  dest[ZZ][YY]=a[ZZ][XX]*b[XX][YY]+a[ZZ][YY]*b[YY][YY]+a[ZZ][ZZ]*b[ZZ][YY];
 +  dest[XX][ZZ]=a[XX][XX]*b[XX][ZZ]+a[XX][YY]*b[YY][ZZ]+a[XX][ZZ]*b[ZZ][ZZ];
 +  dest[YY][ZZ]=a[YY][XX]*b[XX][ZZ]+a[YY][YY]*b[YY][ZZ]+a[YY][ZZ]*b[ZZ][ZZ];
 +  dest[ZZ][ZZ]=a[ZZ][XX]*b[XX][ZZ]+a[ZZ][YY]*b[YY][ZZ]+a[ZZ][ZZ]*b[ZZ][ZZ];
 +}
 +
 +static gmx_inline void transpose(matrix src,matrix dest)
 +{
 +  dest[XX][XX]=src[XX][XX];
 +  dest[YY][XX]=src[XX][YY];
 +  dest[ZZ][XX]=src[XX][ZZ];
 +  dest[XX][YY]=src[YY][XX];
 +  dest[YY][YY]=src[YY][YY];
 +  dest[ZZ][YY]=src[YY][ZZ];
 +  dest[XX][ZZ]=src[ZZ][XX];
 +  dest[YY][ZZ]=src[ZZ][YY];
 +  dest[ZZ][ZZ]=src[ZZ][ZZ];
 +}
 +
 +static gmx_inline void tmmul(matrix a,matrix b,matrix dest)
 +{
 +  /* Computes dest=mmul(transpose(a),b,dest) - used in do_pr_pcoupl */
 +  dest[XX][XX]=a[XX][XX]*b[XX][XX]+a[YY][XX]*b[YY][XX]+a[ZZ][XX]*b[ZZ][XX];
 +  dest[XX][YY]=a[XX][XX]*b[XX][YY]+a[YY][XX]*b[YY][YY]+a[ZZ][XX]*b[ZZ][YY];
 +  dest[XX][ZZ]=a[XX][XX]*b[XX][ZZ]+a[YY][XX]*b[YY][ZZ]+a[ZZ][XX]*b[ZZ][ZZ];
 +  dest[YY][XX]=a[XX][YY]*b[XX][XX]+a[YY][YY]*b[YY][XX]+a[ZZ][YY]*b[ZZ][XX];
 +  dest[YY][YY]=a[XX][YY]*b[XX][YY]+a[YY][YY]*b[YY][YY]+a[ZZ][YY]*b[ZZ][YY];
 +  dest[YY][ZZ]=a[XX][YY]*b[XX][ZZ]+a[YY][YY]*b[YY][ZZ]+a[ZZ][YY]*b[ZZ][ZZ];
 +  dest[ZZ][XX]=a[XX][ZZ]*b[XX][XX]+a[YY][ZZ]*b[YY][XX]+a[ZZ][ZZ]*b[ZZ][XX];
 +  dest[ZZ][YY]=a[XX][ZZ]*b[XX][YY]+a[YY][ZZ]*b[YY][YY]+a[ZZ][ZZ]*b[ZZ][YY];
 +  dest[ZZ][ZZ]=a[XX][ZZ]*b[XX][ZZ]+a[YY][ZZ]*b[YY][ZZ]+a[ZZ][ZZ]*b[ZZ][ZZ];
 +}
 +
 +static gmx_inline void mtmul(matrix a,matrix b,matrix dest)
 +{
 +  /* Computes dest=mmul(a,transpose(b),dest) - used in do_pr_pcoupl */
 +  dest[XX][XX]=a[XX][XX]*b[XX][XX]+a[XX][YY]*b[XX][YY]+a[XX][ZZ]*b[XX][ZZ];
 +  dest[XX][YY]=a[XX][XX]*b[YY][XX]+a[XX][YY]*b[YY][YY]+a[XX][ZZ]*b[YY][ZZ];
 +  dest[XX][ZZ]=a[XX][XX]*b[ZZ][XX]+a[XX][YY]*b[ZZ][YY]+a[XX][ZZ]*b[ZZ][ZZ];
 +  dest[YY][XX]=a[YY][XX]*b[XX][XX]+a[YY][YY]*b[XX][YY]+a[YY][ZZ]*b[XX][ZZ];
 +  dest[YY][YY]=a[YY][XX]*b[YY][XX]+a[YY][YY]*b[YY][YY]+a[YY][ZZ]*b[YY][ZZ];
 +  dest[YY][ZZ]=a[YY][XX]*b[ZZ][XX]+a[YY][YY]*b[ZZ][YY]+a[YY][ZZ]*b[ZZ][ZZ];
 +  dest[ZZ][XX]=a[ZZ][XX]*b[XX][XX]+a[ZZ][YY]*b[XX][YY]+a[ZZ][ZZ]*b[XX][ZZ];
 +  dest[ZZ][YY]=a[ZZ][XX]*b[YY][XX]+a[ZZ][YY]*b[YY][YY]+a[ZZ][ZZ]*b[YY][ZZ];
 +  dest[ZZ][ZZ]=a[ZZ][XX]*b[ZZ][XX]+a[ZZ][YY]*b[ZZ][YY]+a[ZZ][ZZ]*b[ZZ][ZZ];
 +}
 +
 +static gmx_inline real det(matrix a)
 +{
 +  return ( a[XX][XX]*(a[YY][YY]*a[ZZ][ZZ]-a[ZZ][YY]*a[YY][ZZ])
 +        -a[YY][XX]*(a[XX][YY]*a[ZZ][ZZ]-a[ZZ][YY]*a[XX][ZZ])
 +        +a[ZZ][XX]*(a[XX][YY]*a[YY][ZZ]-a[YY][YY]*a[XX][ZZ]));
 +}
 +
 +static gmx_inline void m_add(matrix a,matrix b,matrix dest)
 +{
 +  dest[XX][XX]=a[XX][XX]+b[XX][XX];
 +  dest[XX][YY]=a[XX][YY]+b[XX][YY];
 +  dest[XX][ZZ]=a[XX][ZZ]+b[XX][ZZ];
 +  dest[YY][XX]=a[YY][XX]+b[YY][XX];
 +  dest[YY][YY]=a[YY][YY]+b[YY][YY];
 +  dest[YY][ZZ]=a[YY][ZZ]+b[YY][ZZ];
 +  dest[ZZ][XX]=a[ZZ][XX]+b[ZZ][XX];
 +  dest[ZZ][YY]=a[ZZ][YY]+b[ZZ][YY];
 +  dest[ZZ][ZZ]=a[ZZ][ZZ]+b[ZZ][ZZ];
 +}
 +
 +static gmx_inline void m_sub(matrix a,matrix b,matrix dest)
 +{
 +  dest[XX][XX]=a[XX][XX]-b[XX][XX];
 +  dest[XX][YY]=a[XX][YY]-b[XX][YY];
 +  dest[XX][ZZ]=a[XX][ZZ]-b[XX][ZZ];
 +  dest[YY][XX]=a[YY][XX]-b[YY][XX];
 +  dest[YY][YY]=a[YY][YY]-b[YY][YY];
 +  dest[YY][ZZ]=a[YY][ZZ]-b[YY][ZZ];
 +  dest[ZZ][XX]=a[ZZ][XX]-b[ZZ][XX];
 +  dest[ZZ][YY]=a[ZZ][YY]-b[ZZ][YY];
 +  dest[ZZ][ZZ]=a[ZZ][ZZ]-b[ZZ][ZZ];
 +}
 +
 +static gmx_inline void msmul(matrix m1,real r1,matrix dest)
 +{
 +  dest[XX][XX]=r1*m1[XX][XX];
 +  dest[XX][YY]=r1*m1[XX][YY];
 +  dest[XX][ZZ]=r1*m1[XX][ZZ];
 +  dest[YY][XX]=r1*m1[YY][XX];
 +  dest[YY][YY]=r1*m1[YY][YY];
 +  dest[YY][ZZ]=r1*m1[YY][ZZ];
 +  dest[ZZ][XX]=r1*m1[ZZ][XX];
 +  dest[ZZ][YY]=r1*m1[ZZ][YY];
 +  dest[ZZ][ZZ]=r1*m1[ZZ][ZZ];
 +}
 +
 +static gmx_inline void m_inv_ur0(matrix src,matrix dest)
 +{
 +  double tmp = src[XX][XX]*src[YY][YY]*src[ZZ][ZZ];
 +  if (fabs(tmp) <= 100*GMX_REAL_MIN)
 +    gmx_fatal(FARGS,"Can not invert matrix, determinant is zero");
 +
 +  dest[XX][XX] = 1/src[XX][XX];
 +  dest[YY][YY] = 1/src[YY][YY];
 +  dest[ZZ][ZZ] = 1/src[ZZ][ZZ];
 +  dest[ZZ][XX] = (src[YY][XX]*src[ZZ][YY]*dest[YY][YY]
 +                - src[ZZ][XX])*dest[XX][XX]*dest[ZZ][ZZ];
 +  dest[YY][XX] = -src[YY][XX]*dest[XX][XX]*dest[YY][YY];
 +  dest[ZZ][YY] = -src[ZZ][YY]*dest[YY][YY]*dest[ZZ][ZZ];
 +  dest[XX][YY] = 0.0;
 +  dest[XX][ZZ] = 0.0;
 +  dest[YY][ZZ] = 0.0;
 +}
 +
 +static gmx_inline void m_inv(matrix src,matrix dest)
 +{
 +  const real smallreal = (real)1.0e-24;
 +  const real largereal = (real)1.0e24;
 +  real  deter,c,fc;
 +
 +  deter = det(src);
 +  c     = (real)1.0/deter;
 +  fc    = (real)fabs(c);
 +  
 +  if ((fc <= smallreal) || (fc >= largereal)) 
 +    gmx_fatal(FARGS,"Can not invert matrix, determinant = %e",deter);
 +
 +  dest[XX][XX]= c*(src[YY][YY]*src[ZZ][ZZ]-src[ZZ][YY]*src[YY][ZZ]);
 +  dest[XX][YY]=-c*(src[XX][YY]*src[ZZ][ZZ]-src[ZZ][YY]*src[XX][ZZ]);
 +  dest[XX][ZZ]= c*(src[XX][YY]*src[YY][ZZ]-src[YY][YY]*src[XX][ZZ]);
 +  dest[YY][XX]=-c*(src[YY][XX]*src[ZZ][ZZ]-src[ZZ][XX]*src[YY][ZZ]);
 +  dest[YY][YY]= c*(src[XX][XX]*src[ZZ][ZZ]-src[ZZ][XX]*src[XX][ZZ]);
 +  dest[YY][ZZ]=-c*(src[XX][XX]*src[YY][ZZ]-src[YY][XX]*src[XX][ZZ]);
 +  dest[ZZ][XX]= c*(src[YY][XX]*src[ZZ][YY]-src[ZZ][XX]*src[YY][YY]);
 +  dest[ZZ][YY]=-c*(src[XX][XX]*src[ZZ][YY]-src[ZZ][XX]*src[XX][YY]);
 +  dest[ZZ][ZZ]= c*(src[XX][XX]*src[YY][YY]-src[YY][XX]*src[XX][YY]);
 +}
 +
 +static gmx_inline void mvmul(matrix a,const rvec src,rvec dest)
 +{
 +  dest[XX]=a[XX][XX]*src[XX]+a[XX][YY]*src[YY]+a[XX][ZZ]*src[ZZ];
 +  dest[YY]=a[YY][XX]*src[XX]+a[YY][YY]*src[YY]+a[YY][ZZ]*src[ZZ];
 +  dest[ZZ]=a[ZZ][XX]*src[XX]+a[ZZ][YY]*src[YY]+a[ZZ][ZZ]*src[ZZ];
 +}
 +
 +static gmx_inline void mvmul_ur0(matrix a,const rvec src,rvec dest)
 +{
 +  dest[ZZ]=a[ZZ][XX]*src[XX]+a[ZZ][YY]*src[YY]+a[ZZ][ZZ]*src[ZZ];
 +  dest[YY]=a[YY][XX]*src[XX]+a[YY][YY]*src[YY];
 +  dest[XX]=a[XX][XX]*src[XX];
 +}
 +
 +static gmx_inline void tmvmul_ur0(matrix a,const rvec src,rvec dest)
 +{
 +  dest[XX]=a[XX][XX]*src[XX]+a[YY][XX]*src[YY]+a[ZZ][XX]*src[ZZ];
 +  dest[YY]=                  a[YY][YY]*src[YY]+a[ZZ][YY]*src[ZZ];
 +  dest[ZZ]=                                    a[ZZ][ZZ]*src[ZZ];
 +}
 +
 +static gmx_inline void unitv(const rvec src,rvec dest)
 +{
 +  real linv;
 +  
 +  linv=gmx_invsqrt(norm2(src));
 +  dest[XX]=linv*src[XX];
 +  dest[YY]=linv*src[YY];
 +  dest[ZZ]=linv*src[ZZ];
 +}
 +
 +static gmx_inline void unitv_no_table(const rvec src,rvec dest)
 +{
 +  real linv;
 +  
 +  linv=1.0/sqrt(norm2(src));
 +  dest[XX]=linv*src[XX];
 +  dest[YY]=linv*src[YY];
 +  dest[ZZ]=linv*src[ZZ];
 +}
 +
 +static void calc_lll(rvec box,rvec lll)
 +{
 +  lll[XX] = 2.0*M_PI/box[XX];
 +  lll[YY] = 2.0*M_PI/box[YY];
 +  lll[ZZ] = 2.0*M_PI/box[ZZ];
 +}
 +
 +static gmx_inline real trace(matrix m)
 +{
 +  return (m[XX][XX]+m[YY][YY]+m[ZZ][ZZ]);
 +}
 +
 +static gmx_inline real _divide_err(real a,real b,const char *file,int line)
 +{
 +    if (fabs(b) <= GMX_REAL_MIN) 
 +        gmx_fatal(FARGS,"Dividing by zero, file %s, line %d",file,line);
 +    return a/b;
 +}
 +
 +static gmx_inline int _mod(int a,int b,char *file,int line)
 +{
 +  if(b==0)
 +    gmx_fatal(FARGS,"Modulo zero, file %s, line %d",file,line);
 +  return a % b;
 +}
 +
 +/* Operations on multidimensional rvecs, used e.g. in edsam.c */
 +static void m_rveccopy(int dim, rvec *a, rvec *b)
 +{
 +    /* b = a */
 +    int i;
 +
 +    for (i=0; i<dim; i++)
 +        copy_rvec(a[i],b[i]);
 +} 
 +
 +/*computer matrix vectors from base vectors and angles */
 +static void matrix_convert(matrix box, rvec vec, rvec angle)
 +{
 +    svmul(DEG2RAD,angle,angle);
 +    box[XX][XX] = vec[XX];
 +    box[YY][XX] = vec[YY]*cos(angle[ZZ]);
 +    box[YY][YY] = vec[YY]*sin(angle[ZZ]);
 +    box[ZZ][XX] = vec[ZZ]*cos(angle[YY]);
 +    box[ZZ][YY] = vec[ZZ]
 +                         *(cos(angle[XX])-cos(angle[YY])*cos(angle[ZZ]))/sin(angle[ZZ]);
 +    box[ZZ][ZZ] = sqrt(sqr(vec[ZZ])
 +                       -box[ZZ][XX]*box[ZZ][XX]-box[ZZ][YY]*box[ZZ][YY]);
 +}
 +
 +#define divide_err(a,b) _divide_err((a),(b),__FILE__,__LINE__)
 +#define mod(a,b)    _mod((a),(b),__FILE__,__LINE__)
 +
 +#ifdef __cplusplus
 +}
 +#endif
 +
 +
 +#endif        /* _vec_h */
index 366dda2d545d1f8251bbfc40fdf480c7399d9b28,0000000000000000000000000000000000000000..d059a34767e004920a3d2a799b1a20f22863dfd9
mode 100644,000000..100644
--- /dev/null
@@@ -1,12 -1,0 +1,12 @@@
- Libs.private: @CMAKE_THREAD_LIBS_INIT@ @PKG_DL_LIBS@
 +libdir=@LIB_INSTALL_DIR@
 +includedir=@INCL_INSTALL_DIR@
 +
 +Name: libgromacs
 +Description: Gromacs library
 +URL: http://www.gromacs.org
 +Version: @PROJECT_VERSION@
 +Requires: @PKG_FFT@ @PKG_XML@
++Libs.private: @CMAKE_THREAD_LIBS_INIT@ @PKG_DL_LIBS@ @OpenMP_LINKER_FLAGS@
 +Libs: -L${libdir} -lgromacs@GMX_LIBS_SUFFIX@ @PKG_FFT_LIBS@ -lm
 +Cflags: -I${includedir} @PKG_CFLAGS@
 +
index 519f5353251c4d2e003f76ada13a70171a4b4807,0000000000000000000000000000000000000000..d25b81ec362d35f831ffab2c0e3d8cabe5af547a
mode 100644,000000..100644
--- /dev/null
@@@ -1,1376 -1,0 +1,1376 @@@
-             do_edsam(ir,step,md,cr,xprime,v,box,constr->ed);
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * GROwing Monsters And Cloning Shrimps
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include "confio.h"
 +#include "constr.h"
 +#include "copyrite.h"
 +#include "invblock.h"
 +#include "main.h"
 +#include "mdrun.h"
 +#include "nrnb.h"
 +#include "smalloc.h"
 +#include "vec.h"
 +#include "physics.h"
 +#include "names.h"
 +#include "txtdump.h"
 +#include "domdec.h"
 +#include "pdbio.h"
 +#include "partdec.h"
 +#include "splitter.h"
 +#include "mtop_util.h"
 +#include "gmxfio.h"
 +#include "macros.h"
 +#include "gmx_omp_nthreads.h"
 +
 +typedef struct gmx_constr {
 +  int              ncon_tot;     /* The total number of constraints    */
 +  int              nflexcon;     /* The number of flexible constraints */
 +  int              n_at2con_mt;  /* The size of at2con = #moltypes     */
 +  t_blocka         *at2con_mt;   /* A list of atoms to constraints     */
 +  int              n_at2settle_mt; /* The size of at2settle = #moltypes  */
 +  int              **at2settle_mt; /* A list of atoms to settles         */
 +  gmx_bool         bInterCGsettles;
 +  gmx_lincsdata_t  lincsd;       /* LINCS data                         */
 +  gmx_shakedata_t  shaked;       /* SHAKE data                         */
 +  gmx_settledata_t settled;      /* SETTLE data                        */
 +  int              nblocks;      /* The number of SHAKE blocks         */
 +  int              *sblock;      /* The SHAKE blocks                   */
 +  int              sblock_nalloc;/* The allocation size of sblock      */
 +  real             *lagr;        /* Lagrange multipliers for SHAKE     */
 +  int              lagr_nalloc;  /* The allocation size of lagr        */
 +  int              maxwarn;      /* The maximum number of warnings     */
 +  int              warncount_lincs;
 +  int              warncount_settle;
 +  gmx_edsam_t      ed;           /* The essential dynamics data        */
 +
 +    tensor           *vir_r_m_dr_th;/* Thread local working data          */
 +    int              *settle_error; /* Thread local working data          */
 +
 +  gmx_mtop_t       *warn_mtop;   /* Only used for printing warnings    */
 +} t_gmx_constr;
 +
 +typedef struct {
 +  atom_id iatom[3];
 +  atom_id blocknr;
 +} t_sortblock;
 +
 +static void *init_vetavars(t_vetavars *vars,
 +                           gmx_bool constr_deriv,
 +                           real veta,real vetanew, t_inputrec *ir, gmx_ekindata_t *ekind, gmx_bool bPscal) 
 +{
 +    double g;
 +    int i;
 +
 +    /* first, set the alpha integrator variable */
 +    if ((ir->opts.nrdf[0] > 0) && bPscal) 
 +    {
 +        vars->alpha = 1.0 + DIM/((double)ir->opts.nrdf[0]);  
 +    } else {
 +        vars->alpha = 1.0;
 +    }
 +    g = 0.5*veta*ir->delta_t;
 +    vars->rscale = exp(g)*series_sinhx(g);
 +    g = -0.25*vars->alpha*veta*ir->delta_t;
 +    vars->vscale = exp(g)*series_sinhx(g);
 +    vars->rvscale = vars->vscale*vars->rscale;
 +    vars->veta = vetanew;
 +
 +    if (constr_deriv)
 +    {
 +        snew(vars->vscale_nhc,ir->opts.ngtc);
 +        if ((ekind==NULL) || (!bPscal))
 +        {
 +            for (i=0;i<ir->opts.ngtc;i++)
 +            {
 +                vars->vscale_nhc[i] = 1;
 +            }
 +        }
 +        else
 +        {
 +            for (i=0;i<ir->opts.ngtc;i++)
 +            {
 +                vars->vscale_nhc[i] = ekind->tcstat[i].vscale_nhc;
 +            }
 +        }
 +    }
 +    else
 +    {
 +        vars->vscale_nhc = NULL;
 +    }
 +
 +    return vars;
 +}
 +
 +static void free_vetavars(t_vetavars *vars) 
 +{
 +    if (vars->vscale_nhc != NULL)
 +    {
 +        sfree(vars->vscale_nhc);
 +    }
 +}
 +
 +static int pcomp(const void *p1, const void *p2)
 +{
 +  int     db;
 +  atom_id min1,min2,max1,max2;
 +  t_sortblock *a1=(t_sortblock *)p1;
 +  t_sortblock *a2=(t_sortblock *)p2;
 +  
 +  db=a1->blocknr-a2->blocknr;
 +  
 +  if (db != 0)
 +    return db;
 +    
 +  min1=min(a1->iatom[1],a1->iatom[2]);
 +  max1=max(a1->iatom[1],a1->iatom[2]);
 +  min2=min(a2->iatom[1],a2->iatom[2]);
 +  max2=max(a2->iatom[1],a2->iatom[2]);
 +  
 +  if (min1 == min2)
 +    return max1-max2;
 +  else
 +    return min1-min2;
 +}
 +
 +int n_flexible_constraints(struct gmx_constr *constr)
 +{
 +  int nflexcon;
 +
 +  if (constr)
 +    nflexcon = constr->nflexcon;
 +  else
 +    nflexcon = 0;
 +
 +  return nflexcon;
 +}
 +
 +void too_many_constraint_warnings(int eConstrAlg,int warncount)
 +{
 +  const char *abort="- aborting to avoid logfile runaway.\n"
 +    "This normally happens when your system is not sufficiently equilibrated,"
 +    "or if you are changing lambda too fast in free energy simulations.\n";
 +  
 +  gmx_fatal(FARGS,
 +          "Too many %s warnings (%d)\n"
 +          "If you know what you are doing you can %s"
 +          "set the environment variable GMX_MAXCONSTRWARN to -1,\n"
 +          "but normally it is better to fix the problem",
 +          (eConstrAlg == econtLINCS) ? "LINCS" : "SETTLE",warncount,
 +          (eConstrAlg == econtLINCS) ?
 +          "adjust the lincs warning threshold in your mdp file\nor " : "\n");
 +}
 +
 +static void write_constr_pdb(const char *fn,const char *title,
 +                             gmx_mtop_t *mtop,
 +                             int start,int homenr,t_commrec *cr,
 +                             rvec x[],matrix box)
 +{
 +    char fname[STRLEN],format[STRLEN];
 +    FILE *out;
 +    int  dd_ac0=0,dd_ac1=0,i,ii,resnr;
 +    gmx_domdec_t *dd;
 +    char *anm,*resnm;
 +  
 +    dd = NULL;
 +    if (PAR(cr))
 +    {
 +        sprintf(fname,"%s_n%d.pdb",fn,cr->sim_nodeid);
 +        if (DOMAINDECOMP(cr))
 +        {
 +            dd = cr->dd;
 +            dd_get_constraint_range(dd,&dd_ac0,&dd_ac1);
 +            start = 0;
 +            homenr = dd_ac1;
 +        }
 +    }
 +    else
 +    {
 +        sprintf(fname,"%s.pdb",fn);
 +    }
 +    sprintf(format,"%s\n",get_pdbformat());
 +    
 +    out = gmx_fio_fopen(fname,"w");
 +    
 +    fprintf(out,"TITLE     %s\n",title);
 +    gmx_write_pdb_box(out,-1,box);
 +    for(i=start; i<start+homenr; i++)
 +    {
 +        if (dd != NULL)
 +        {
 +            if (i >= dd->nat_home && i < dd_ac0)
 +            {
 +                continue;
 +            }
 +            ii = dd->gatindex[i];
 +        }
 +        else
 +        {
 +            ii = i;
 +        }
 +        gmx_mtop_atominfo_global(mtop,ii,&anm,&resnr,&resnm);
 +        fprintf(out,format,"ATOM",(ii+1)%100000,
 +                anm,resnm,' ',resnr%10000,' ',
 +                10*x[i][XX],10*x[i][YY],10*x[i][ZZ]);
 +    }
 +    fprintf(out,"TER\n");
 +
 +    gmx_fio_fclose(out);
 +}
 +                           
 +static void dump_confs(FILE *fplog,gmx_large_int_t step,gmx_mtop_t *mtop,
 +                     int start,int homenr,t_commrec *cr,
 +                     rvec x[],rvec xprime[],matrix box)
 +{
 +  char buf[256],buf2[22];
 + 
 +  char *env=getenv("GMX_SUPPRESS_DUMP");
 +  if (env)
 +      return; 
 +  
 +  sprintf(buf,"step%sb",gmx_step_str(step,buf2));
 +  write_constr_pdb(buf,"initial coordinates",
 +                 mtop,start,homenr,cr,x,box);
 +  sprintf(buf,"step%sc",gmx_step_str(step,buf2));
 +  write_constr_pdb(buf,"coordinates after constraining",
 +                 mtop,start,homenr,cr,xprime,box);
 +  if (fplog)
 +  {
 +      fprintf(fplog,"Wrote pdb files with previous and current coordinates\n");
 +  }
 +  fprintf(stderr,"Wrote pdb files with previous and current coordinates\n");
 +}
 +
 +static void pr_sortblock(FILE *fp,const char *title,int nsb,t_sortblock sb[])
 +{
 +  int i;
 +  
 +  fprintf(fp,"%s\n",title);
 +  for(i=0; (i<nsb); i++)
 +    fprintf(fp,"i: %5d, iatom: (%5d %5d %5d), blocknr: %5d\n",
 +          i,sb[i].iatom[0],sb[i].iatom[1],sb[i].iatom[2],
 +          sb[i].blocknr);
 +}
 +
 +gmx_bool constrain(FILE *fplog,gmx_bool bLog,gmx_bool bEner,
 +                   struct gmx_constr *constr,
 +                   t_idef *idef,t_inputrec *ir,gmx_ekindata_t *ekind,
 +                   t_commrec *cr,
 +                   gmx_large_int_t step,int delta_step,
 +                   t_mdatoms *md,
 +                   rvec *x,rvec *xprime,rvec *min_proj,
 +                   gmx_bool bMolPBC,matrix box,
 +                   real lambda,real *dvdlambda,
 +                   rvec *v,tensor *vir,
 +                   t_nrnb *nrnb,int econq,gmx_bool bPscal,
 +                   real veta, real vetanew)
 +{
 +    gmx_bool    bOK,bDump;
 +    int     start,homenr,nrend;
 +    int     i,j,d;
 +    int     ncons,settle_error;
 +    tensor  vir_r_m_dr;
 +    rvec    *vstor;
 +    real    invdt,vir_fac,t;
 +    t_ilist *settle;
 +    int     nsettle;
 +    t_pbc   pbc,*pbc_null;
 +    char    buf[22];
 +    t_vetavars vetavar;
 +    int     nth,th;
 +
 +    if (econq == econqForceDispl && !EI_ENERGY_MINIMIZATION(ir->eI))
 +    {
 +        gmx_incons("constrain called for forces displacements while not doing energy minimization, can not do this while the LINCS and SETTLE constraint connection matrices are mass weighted");
 +    }
 +    
 +    bOK   = TRUE;
 +    bDump = FALSE;
 +    
 +    start  = md->start;
 +    homenr = md->homenr;
 +    nrend = start+homenr;
 +
 +    /* set constants for pressure control integration */ 
 +    init_vetavars(&vetavar,econq!=econqCoord,
 +                  veta,vetanew,ir,ekind,bPscal);
 +
 +    if (ir->delta_t == 0)
 +    {
 +        invdt = 0;
 +    }
 +    else
 +    {
 +        invdt  = 1/ir->delta_t;
 +    }
 +
 +    if (ir->efep != efepNO && EI_DYNAMICS(ir->eI))
 +    {
 +        /* Set the constraint lengths for the step at which this configuration
 +         * is meant to be. The invmasses should not be changed.
 +         */
 +        lambda += delta_step*ir->fepvals->delta_lambda;
 +    }
 +    
 +    if (vir != NULL)
 +    {
 +        clear_mat(vir_r_m_dr);
 +    }
 +    
 +    where();
 +
 +    settle  = &idef->il[F_SETTLE];
 +    nsettle = settle->nr/(1+NRAL(F_SETTLE));
 +
 +    if (nsettle > 0)
 +    {
 +        nth = gmx_omp_nthreads_get(emntSETTLE);
 +    }
 +    else
 +    {
 +        nth = 1;
 +    }
 +
 +    if (nth > 1 && constr->vir_r_m_dr_th == NULL)
 +    {
 +        snew(constr->vir_r_m_dr_th,nth);
 +        snew(constr->settle_error,nth);
 +    }
 +    
 +    settle_error = -1;
 +
 +    /* We do not need full pbc when constraints do not cross charge groups,
 +     * i.e. when dd->constraint_comm==NULL.
 +     * Note that PBC for constraints is different from PBC for bondeds.
 +     * For constraints there is both forward and backward communication.
 +     */
 +    if (ir->ePBC != epbcNONE &&
 +        (cr->dd || bMolPBC) && !(cr->dd && cr->dd->constraint_comm==NULL))
 +    {
 +        /* With pbc=screw the screw has been changed to a shift
 +         * by the constraint coordinate communication routine,
 +         * so that here we can use normal pbc.
 +         */
 +        pbc_null = set_pbc_dd(&pbc,ir->ePBC,cr->dd,FALSE,box);
 +    }
 +    else
 +    {
 +        pbc_null = NULL;
 +    }
 +
 +    /* Communicate the coordinates required for the non-local constraints
 +     * for LINCS and/or SETTLE.
 +     */
 +    if (cr->dd)
 +    {
 +        dd_move_x_constraints(cr->dd,box,x,xprime);
 +    }
 +      else if (PARTDECOMP(cr))
 +      {
 +              pd_move_x_constraints(cr,x,xprime);
 +      }       
 +
 +    if (constr->lincsd != NULL)
 +    {
 +        bOK = constrain_lincs(fplog,bLog,bEner,ir,step,constr->lincsd,md,cr,
 +                              x,xprime,min_proj,
 +                              box,pbc_null,lambda,dvdlambda,
 +                              invdt,v,vir!=NULL,vir_r_m_dr,
 +                              econq,nrnb,
 +                              constr->maxwarn,&constr->warncount_lincs);
 +        if (!bOK && constr->maxwarn >= 0)
 +        {
 +            if (fplog != NULL)
 +            {
 +                fprintf(fplog,"Constraint error in algorithm %s at step %s\n",
 +                        econstr_names[econtLINCS],gmx_step_str(step,buf));
 +            }
 +            bDump = TRUE;
 +        }
 +    } 
 +    
 +    if (constr->nblocks > 0)
 +    {
 +        switch (econq) {
 +        case (econqCoord):
 +            bOK = bshakef(fplog,constr->shaked,
 +                          homenr,md->invmass,constr->nblocks,constr->sblock,
 +                          idef,ir,x,xprime,nrnb,
 +                          constr->lagr,lambda,dvdlambda,
 +                          invdt,v,vir!=NULL,vir_r_m_dr,
 +                          constr->maxwarn>=0,econq,&vetavar);
 +            break;
 +        case (econqVeloc):
 +            bOK = bshakef(fplog,constr->shaked,
 +                          homenr,md->invmass,constr->nblocks,constr->sblock,
 +                          idef,ir,x,min_proj,nrnb,
 +                          constr->lagr,lambda,dvdlambda,
 +                          invdt,NULL,vir!=NULL,vir_r_m_dr,
 +                          constr->maxwarn>=0,econq,&vetavar);
 +            break;
 +        default:
 +            gmx_fatal(FARGS,"Internal error, SHAKE called for constraining something else than coordinates");
 +            break;
 +        }
 +        
 +        if (!bOK && constr->maxwarn >= 0)
 +        {
 +            if (fplog != NULL)
 +            {
 +                fprintf(fplog,"Constraint error in algorithm %s at step %s\n",
 +                        econstr_names[econtSHAKE],gmx_step_str(step,buf));
 +            }
 +            bDump = TRUE;
 +        }
 +    }
 +    
 +    if (nsettle > 0)
 +    {
 +        int calcvir_atom_end;
 +
 +        if (vir == NULL)
 +        {
 +            calcvir_atom_end = 0;
 +        }
 +        else
 +        {
 +            calcvir_atom_end = md->start + md->homenr;
 +        }
 +
 +        switch (econq)
 +        {
 +        case econqCoord:
 +#pragma omp parallel for num_threads(nth) schedule(static)
 +            for(th=0; th<nth; th++)
 +            {
 +                int start_th,end_th;
 +
 +                if (th > 0)
 +                {
 +                    clear_mat(constr->vir_r_m_dr_th[th]);
 +                }
 +
 +                start_th = (nsettle* th   )/nth;
 +                end_th   = (nsettle*(th+1))/nth;
 +                if (start_th >= 0 && end_th - start_th > 0)
 +                {
 +                    csettle(constr->settled,
 +                            end_th-start_th,
 +                            settle->iatoms+start_th*(1+NRAL(F_SETTLE)),
 +                            pbc_null,
 +                            x[0],xprime[0],
 +                            invdt,v?v[0]:NULL,calcvir_atom_end,
 +                            th == 0 ? vir_r_m_dr : constr->vir_r_m_dr_th[th],
 +                            th == 0 ? &settle_error : &constr->settle_error[th],
 +                            &vetavar);
 +                }
 +            }
 +            inc_nrnb(nrnb,eNR_SETTLE,nsettle);
 +            if (v != NULL)
 +            {
 +                inc_nrnb(nrnb,eNR_CONSTR_V,nsettle*3);
 +            }
 +            if (vir != NULL)
 +            {
 +                inc_nrnb(nrnb,eNR_CONSTR_VIR,nsettle*3);
 +            }
 +            break;
 +        case econqVeloc:
 +        case econqDeriv:
 +        case econqForce:
 +        case econqForceDispl:
 +#pragma omp parallel for num_threads(nth) schedule(static)
 +            for(th=0; th<nth; th++)
 +            {
 +                int start_th,end_th;
 +
 +                if (th > 0)
 +                {
 +                    clear_mat(constr->vir_r_m_dr_th[th]);
 +                }
 +                
 +                start_th = (nsettle* th   )/nth;
 +                end_th   = (nsettle*(th+1))/nth;
 +
 +                if (start_th >= 0 && end_th - start_th > 0)
 +                {
 +                    settle_proj(fplog,constr->settled,econq,
 +                                end_th-start_th,
 +                                settle->iatoms+start_th*(1+NRAL(F_SETTLE)),
 +                                pbc_null,
 +                                x,
 +                                xprime,min_proj,calcvir_atom_end,
 +                                th == 0 ? vir_r_m_dr : constr->vir_r_m_dr_th[th],
 +                                &vetavar);
 +                }
 +            }
 +            /* This is an overestimate */
 +            inc_nrnb(nrnb,eNR_SETTLE,nsettle);
 +            break;
 +        case econqDeriv_FlexCon:
 +            /* Nothing to do, since the are no flexible constraints in settles */
 +            break;
 +        default:
 +            gmx_incons("Unknown constraint quantity for settle");
 +        }
 +    }
 +
 +    if (settle->nr > 0)
 +    {
 +        /* Combine virial and error info of the other threads */
 +        for(i=1; i<nth; i++)
 +        {
 +            m_add(vir_r_m_dr,constr->vir_r_m_dr_th[i],vir_r_m_dr);
 +            settle_error = constr->settle_error[i];
 +        } 
 +
 +        if (econq == econqCoord && settle_error >= 0)
 +        {
 +            bOK = FALSE;
 +            if (constr->maxwarn >= 0)
 +            {
 +                char buf[256];
 +                sprintf(buf,
 +                        "\nstep " gmx_large_int_pfmt ": Water molecule starting at atom %d can not be "
 +                        "settled.\nCheck for bad contacts and/or reduce the timestep if appropriate.\n",
 +                        step,ddglatnr(cr->dd,settle->iatoms[settle_error*(1+NRAL(F_SETTLE))+1]));
 +                if (fplog)
 +                {
 +                    fprintf(fplog,"%s",buf);
 +                }
 +                fprintf(stderr,"%s",buf);
 +                constr->warncount_settle++;
 +                if (constr->warncount_settle > constr->maxwarn)
 +                {
 +                    too_many_constraint_warnings(-1,constr->warncount_settle);
 +                }
 +                bDump = TRUE;
 +            }
 +        }
 +    }
 +        
 +    free_vetavars(&vetavar);
 +    
 +    if (vir != NULL)
 +    {
 +        switch (econq)
 +        {
 +        case econqCoord:
 +            vir_fac = 0.5/(ir->delta_t*ir->delta_t);
 +            break;
 +        case econqVeloc:
 +            vir_fac = 0.5/ir->delta_t;
 +            break;
 +        case econqForce:
 +        case econqForceDispl:
 +            vir_fac = 0.5;
 +            break;
 +        default:
 +            vir_fac = 0;
 +            gmx_incons("Unsupported constraint quantity for virial");
 +        }
 +        
 +        if (EI_VV(ir->eI))
 +        {
 +            vir_fac *= 2;  /* only constraining over half the distance here */
 +        }
 +        for(i=0; i<DIM; i++)
 +        {
 +            for(j=0; j<DIM; j++)
 +            {
 +                (*vir)[i][j] = vir_fac*vir_r_m_dr[i][j];
 +            }
 +        }
 +    }
 +    
 +    if (bDump)
 +    {
 +        dump_confs(fplog,step,constr->warn_mtop,start,homenr,cr,x,xprime,box);
 +    }
 +    
 +    if (econq == econqCoord)
 +    {
 +        if (ir->ePull == epullCONSTRAINT)
 +        {
 +            if (EI_DYNAMICS(ir->eI))
 +            {
 +                t = ir->init_t + (step + delta_step)*ir->delta_t;
 +            }
 +            else
 +            {
 +                t = ir->init_t;
 +            }
 +            set_pbc(&pbc,ir->ePBC,box);
 +            pull_constraint(ir->pull,md,&pbc,cr,ir->delta_t,t,x,xprime,v,*vir);
 +        }
 +        if (constr->ed && delta_step > 0)
 +        {
 +            /* apply the essential dynamcs constraints here */
-     if (ed != NULL) 
++            do_edsam(ir,step,cr,xprime,v,box,constr->ed);
 +        }
 +    }
 +    
 +    return bOK;
 +}
 +
 +real *constr_rmsd_data(struct gmx_constr *constr)
 +{
 +  if (constr->lincsd)
 +    return lincs_rmsd_data(constr->lincsd);
 +  else
 +    return NULL;
 +}
 +
 +real constr_rmsd(struct gmx_constr *constr,gmx_bool bSD2)
 +{
 +  if (constr->lincsd)
 +    return lincs_rmsd(constr->lincsd,bSD2);
 +  else
 +    return 0;
 +}
 +
 +static void make_shake_sblock_pd(struct gmx_constr *constr,
 +                               t_idef *idef,t_mdatoms *md)
 +{
 +  int  i,j,m,ncons;
 +  int  bstart,bnr;
 +  t_blocka    sblocks;
 +  t_sortblock *sb;
 +  t_iatom     *iatom;
 +  atom_id     *inv_sblock;
 +
 +  /* Since we are processing the local topology,
 +   * the F_CONSTRNC ilist has been concatenated to the F_CONSTR ilist.
 +   */
 +  ncons = idef->il[F_CONSTR].nr/3;
 +
 +  init_blocka(&sblocks);
 +  gen_sblocks(NULL,md->start,md->start+md->homenr,idef,&sblocks,FALSE);
 +  
 +  /*
 +    bstart=(idef->nodeid > 0) ? blocks->multinr[idef->nodeid-1] : 0;
 +    nblocks=blocks->multinr[idef->nodeid] - bstart;
 +  */
 +  bstart  = 0;
 +  constr->nblocks = sblocks.nr;
 +  if (debug) 
 +    fprintf(debug,"ncons: %d, bstart: %d, nblocks: %d\n",
 +          ncons,bstart,constr->nblocks);
 +  
 +  /* Calculate block number for each atom */
 +  inv_sblock = make_invblocka(&sblocks,md->nr);
 +  
 +  done_blocka(&sblocks);
 +  
 +  /* Store the block number in temp array and
 +   * sort the constraints in order of the sblock number 
 +   * and the atom numbers, really sorting a segment of the array!
 +   */
 +#ifdef DEBUGIDEF 
 +  pr_idef(fplog,0,"Before Sort",idef);
 +#endif
 +  iatom=idef->il[F_CONSTR].iatoms;
 +  snew(sb,ncons);
 +  for(i=0; (i<ncons); i++,iatom+=3) {
 +    for(m=0; (m<3); m++)
 +      sb[i].iatom[m] = iatom[m];
 +    sb[i].blocknr = inv_sblock[iatom[1]];
 +  }
 +  
 +  /* Now sort the blocks */
 +  if (debug) {
 +    pr_sortblock(debug,"Before sorting",ncons,sb);
 +    fprintf(debug,"Going to sort constraints\n");
 +  }
 +  
 +  qsort(sb,ncons,(size_t)sizeof(*sb),pcomp);
 +  
 +  if (debug) {
 +    pr_sortblock(debug,"After sorting",ncons,sb);
 +  }
 +  
 +  iatom=idef->il[F_CONSTR].iatoms;
 +  for(i=0; (i<ncons); i++,iatom+=3) 
 +    for(m=0; (m<3); m++)
 +      iatom[m]=sb[i].iatom[m];
 +#ifdef DEBUGIDEF
 +  pr_idef(fplog,0,"After Sort",idef);
 +#endif
 +  
 +  j=0;
 +  snew(constr->sblock,constr->nblocks+1);
 +  bnr=-2;
 +  for(i=0; (i<ncons); i++) {
 +    if (sb[i].blocknr != bnr) {
 +      bnr=sb[i].blocknr;
 +      constr->sblock[j++]=3*i;
 +    }
 +  }
 +  /* Last block... */
 +  constr->sblock[j++] = 3*ncons;
 +  
 +  if (j != (constr->nblocks+1)) {
 +    fprintf(stderr,"bstart: %d\n",bstart);
 +    fprintf(stderr,"j: %d, nblocks: %d, ncons: %d\n",
 +          j,constr->nblocks,ncons);
 +    for(i=0; (i<ncons); i++)
 +      fprintf(stderr,"i: %5d  sb[i].blocknr: %5u\n",i,sb[i].blocknr);
 +    for(j=0; (j<=constr->nblocks); j++)
 +      fprintf(stderr,"sblock[%3d]=%5d\n",j,(int)constr->sblock[j]);
 +    gmx_fatal(FARGS,"DEATH HORROR: "
 +            "sblocks does not match idef->il[F_CONSTR]");
 +  }
 +  sfree(sb);
 +  sfree(inv_sblock);
 +}
 +
 +static void make_shake_sblock_dd(struct gmx_constr *constr,
 +                               t_ilist *ilcon,t_block *cgs,
 +                               gmx_domdec_t *dd)
 +{
 +  int ncons,c,cg;
 +  t_iatom *iatom;
 +
 +  if (dd->ncg_home+1 > constr->sblock_nalloc) {
 +    constr->sblock_nalloc = over_alloc_dd(dd->ncg_home+1);
 +    srenew(constr->sblock,constr->sblock_nalloc);
 +  }
 +  
 +  ncons = ilcon->nr/3;
 +  iatom = ilcon->iatoms;
 +  constr->nblocks = 0;
 +  cg = 0;
 +  for(c=0; c<ncons; c++) {
 +    if (c == 0 || iatom[1] >= cgs->index[cg+1]) {
 +      constr->sblock[constr->nblocks++] = 3*c;
 +      while (iatom[1] >= cgs->index[cg+1])
 +      cg++;
 +    }
 +    iatom += 3;
 +  }
 +  constr->sblock[constr->nblocks] = 3*ncons;
 +}
 +
 +t_blocka make_at2con(int start,int natoms,
 +                   t_ilist *ilist,t_iparams *iparams,
 +                   gmx_bool bDynamics,int *nflexiblecons)
 +{
 +  int *count,ncon,con,con_tot,nflexcon,ftype,i,a;
 +  t_iatom  *ia;
 +  t_blocka at2con;
 +  gmx_bool bFlexCon;
 +  
 +  snew(count,natoms);
 +  nflexcon = 0;
 +  for(ftype=F_CONSTR; ftype<=F_CONSTRNC; ftype++) {
 +    ncon = ilist[ftype].nr/3;
 +    ia   = ilist[ftype].iatoms;
 +    for(con=0; con<ncon; con++) {
 +      bFlexCon = (iparams[ia[0]].constr.dA == 0 &&
 +                iparams[ia[0]].constr.dB == 0);
 +      if (bFlexCon)
 +      nflexcon++;
 +      if (bDynamics || !bFlexCon) {
 +      for(i=1; i<3; i++) {
 +        a = ia[i] - start;
 +        count[a]++;
 +      }
 +      }
 +      ia += 3;
 +    }
 +  }
 +  *nflexiblecons = nflexcon;
 +
 +  at2con.nr = natoms;
 +  at2con.nalloc_index = at2con.nr+1;
 +  snew(at2con.index,at2con.nalloc_index);
 +  at2con.index[0] = 0;
 +  for(a=0; a<natoms; a++) {
 +    at2con.index[a+1] = at2con.index[a] + count[a];
 +    count[a] = 0;
 +  }
 +  at2con.nra = at2con.index[natoms];
 +  at2con.nalloc_a = at2con.nra;
 +  snew(at2con.a,at2con.nalloc_a);
 +
 +  /* The F_CONSTRNC constraints have constraint numbers
 +   * that continue after the last F_CONSTR constraint.
 +   */
 +  con_tot = 0;
 +  for(ftype=F_CONSTR; ftype<=F_CONSTRNC; ftype++) {
 +    ncon = ilist[ftype].nr/3;
 +    ia   = ilist[ftype].iatoms;
 +    for(con=0; con<ncon; con++) {
 +      bFlexCon = (iparams[ia[0]].constr.dA == 0 &&
 +                iparams[ia[0]].constr.dB == 0);
 +      if (bDynamics || !bFlexCon) {
 +      for(i=1; i<3; i++) {
 +        a = ia[i] - start;
 +        at2con.a[at2con.index[a]+count[a]++] = con_tot;
 +      }
 +      }
 +      con_tot++;
 +      ia += 3;
 +    }
 +  }
 +  
 +  sfree(count);
 +
 +  return at2con;
 +}
 +
 +static int *make_at2settle(int natoms,const t_ilist *ilist)
 +{
 +    int *at2s;
 +    int a,stride,s;
 +  
 +    snew(at2s,natoms);
 +    /* Set all to no settle */
 +    for(a=0; a<natoms; a++)
 +    {
 +        at2s[a] = -1;
 +    }
 +
 +    stride = 1 + NRAL(F_SETTLE);
 +
 +    for(s=0; s<ilist->nr; s+=stride)
 +    {
 +        at2s[ilist->iatoms[s+1]] = s/stride;
 +        at2s[ilist->iatoms[s+2]] = s/stride;
 +        at2s[ilist->iatoms[s+3]] = s/stride;
 +    }
 +
 +    return at2s;
 +}
 +
 +void set_constraints(struct gmx_constr *constr,
 +                     gmx_localtop_t *top,t_inputrec *ir,
 +                     t_mdatoms *md,t_commrec *cr)
 +{
 +    t_idef *idef;
 +    int    ncons;
 +    t_ilist *settle;
 +    int    iO,iH;
 +    
 +    idef = &top->idef;
 +       
 +    if (constr->ncon_tot > 0)
 +    {
 +        /* We are using the local topology,
 +         * so there are only F_CONSTR constraints.
 +         */
 +        ncons = idef->il[F_CONSTR].nr/3;
 +        
 +        /* With DD we might also need to call LINCS with ncons=0 for
 +         * communicating coordinates to other nodes that do have constraints.
 +         */
 +        if (ir->eConstrAlg == econtLINCS)
 +        {
 +            set_lincs(idef,md,EI_DYNAMICS(ir->eI),cr,constr->lincsd);
 +        }
 +        if (ir->eConstrAlg == econtSHAKE)
 +        {
 +            if (cr->dd)
 +            {
 +                make_shake_sblock_dd(constr,&idef->il[F_CONSTR],&top->cgs,cr->dd);
 +            }
 +            else
 +            {
 +                make_shake_sblock_pd(constr,idef,md);
 +            }
 +            if (ncons > constr->lagr_nalloc)
 +            {
 +                constr->lagr_nalloc = over_alloc_dd(ncons);
 +                srenew(constr->lagr,constr->lagr_nalloc);
 +            }
 +        }
 +    }
 +
 +    if (idef->il[F_SETTLE].nr > 0 && constr->settled == NULL)
 +    {
 +        settle = &idef->il[F_SETTLE];
 +        iO = settle->iatoms[1];
 +        iH = settle->iatoms[2];
 +        constr->settled =
 +            settle_init(md->massT[iO],md->massT[iH],
 +                        md->invmass[iO],md->invmass[iH],
 +                        idef->iparams[settle->iatoms[0]].settle.doh,
 +                        idef->iparams[settle->iatoms[0]].settle.dhh);
 +    }
 +    
 +    /* Make a selection of the local atoms for essential dynamics */
 +    if (constr->ed && cr->dd)
 +    {
 +        dd_make_local_ed_indices(cr->dd,constr->ed);
 +    }
 +}
 +
 +static void constr_recur(t_blocka *at2con,
 +                       t_ilist *ilist,t_iparams *iparams,gmx_bool bTopB,
 +                       int at,int depth,int nc,int *path,
 +                       real r0,real r1,real *r2max,
 +                       int *count)
 +{
 +  int  ncon1;
 +  t_iatom *ia1,*ia2;
 +  int  c,con,a1;
 +  gmx_bool bUse;
 +  t_iatom *ia;
 +  real len,rn0,rn1;
 +
 +  (*count)++;
 +
 +  ncon1 = ilist[F_CONSTR].nr/3;
 +  ia1   = ilist[F_CONSTR].iatoms;
 +  ia2   = ilist[F_CONSTRNC].iatoms;
 +
 +  /* Loop over all constraints connected to this atom */
 +  for(c=at2con->index[at]; c<at2con->index[at+1]; c++) {
 +    con = at2con->a[c];
 +    /* Do not walk over already used constraints */
 +    bUse = TRUE;
 +    for(a1=0; a1<depth; a1++) {
 +      if (con == path[a1])
 +      bUse = FALSE;
 +    }
 +    if (bUse) {
 +      ia = constr_iatomptr(ncon1,ia1,ia2,con);
 +      /* Flexible constraints currently have length 0, which is incorrect */
 +      if (!bTopB)
 +      len = iparams[ia[0]].constr.dA;
 +      else
 +      len = iparams[ia[0]].constr.dB;
 +      /* In the worst case the bond directions alternate */
 +      if (nc % 2 == 0) {
 +      rn0 = r0 + len;
 +      rn1 = r1;
 +      } else {
 +      rn0 = r0;
 +      rn1 = r1 + len;
 +      }
 +      /* Assume angles of 120 degrees between all bonds */
 +      if (rn0*rn0 + rn1*rn1 + rn0*rn1 > *r2max) {
 +      *r2max = rn0*rn0 + rn1*rn1 + r0*rn1;
 +      if (debug) {
 +        fprintf(debug,"Found longer constraint distance: r0 %5.3f r1 %5.3f rmax %5.3f\n", rn0,rn1,sqrt(*r2max));
 +        for(a1=0; a1<depth; a1++)
 +          fprintf(debug," %d %5.3f",
 +                  path[a1],
 +                  iparams[constr_iatomptr(ncon1,ia1,ia2,con)[0]].constr.dA);
 +        fprintf(debug," %d %5.3f\n",con,len);
 +      }
 +      }
 +      /* Limit the number of recursions to 1000*nc,
 +       * so a call does not take more than a second,
 +       * even for highly connected systems.
 +       */
 +      if (depth + 1 < nc && *count < 1000*nc) {
 +      if (ia[1] == at)
 +        a1 = ia[2];
 +      else
 +        a1 = ia[1];
 +      /* Recursion */
 +      path[depth] = con;
 +      constr_recur(at2con,ilist,iparams,
 +                   bTopB,a1,depth+1,nc,path,rn0,rn1,r2max,count);
 +      path[depth] = -1;
 +      }
 +    }
 +  }
 +}
 +
 +static real constr_r_max_moltype(FILE *fplog,
 +                               gmx_moltype_t *molt,t_iparams *iparams,
 +                               t_inputrec *ir)
 +{
 +  int natoms,nflexcon,*path,at,count;
 +
 +  t_blocka at2con;
 +  real r0,r1,r2maxA,r2maxB,rmax,lam0,lam1;
 +
 +  if (molt->ilist[F_CONSTR].nr   == 0 &&
 +      molt->ilist[F_CONSTRNC].nr == 0) {
 +    return 0;
 +  }
 +  
 +  natoms = molt->atoms.nr;
 +
 +  at2con = make_at2con(0,natoms,molt->ilist,iparams,
 +                     EI_DYNAMICS(ir->eI),&nflexcon);
 +  snew(path,1+ir->nProjOrder);
 +  for(at=0; at<1+ir->nProjOrder; at++)
 +    path[at] = -1;
 +
 +  r2maxA = 0;
 +  for(at=0; at<natoms; at++) {
 +    r0 = 0;
 +    r1 = 0;
 +
 +    count = 0;
 +    constr_recur(&at2con,molt->ilist,iparams,
 +               FALSE,at,0,1+ir->nProjOrder,path,r0,r1,&r2maxA,&count);
 +  }
 +  if (ir->efep == efepNO) {
 +    rmax = sqrt(r2maxA);
 +  } else {
 +    r2maxB = 0;
 +    for(at=0; at<natoms; at++) {
 +      r0 = 0;
 +      r1 = 0;
 +      count = 0;
 +      constr_recur(&at2con,molt->ilist,iparams,
 +                 TRUE,at,0,1+ir->nProjOrder,path,r0,r1,&r2maxB,&count);
 +    }
 +    lam0 = ir->fepvals->init_lambda;
 +    if (EI_DYNAMICS(ir->eI))
 +      lam0 += ir->init_step*ir->fepvals->delta_lambda;
 +    rmax = (1 - lam0)*sqrt(r2maxA) + lam0*sqrt(r2maxB);
 +    if (EI_DYNAMICS(ir->eI)) {
 +      lam1 = ir->fepvals->init_lambda + (ir->init_step + ir->nsteps)*ir->fepvals->delta_lambda;
 +      rmax = max(rmax,(1 - lam1)*sqrt(r2maxA) + lam1*sqrt(r2maxB));
 +    }
 +  }
 +
 +  done_blocka(&at2con);
 +  sfree(path);
 +
 +  return rmax;
 +}
 +
 +real constr_r_max(FILE *fplog,gmx_mtop_t *mtop,t_inputrec *ir)
 +{
 +  int mt;
 +  real rmax;
 +
 +  rmax = 0;
 +  for(mt=0; mt<mtop->nmoltype; mt++) {
 +    rmax = max(rmax,
 +             constr_r_max_moltype(fplog,&mtop->moltype[mt],
 +                                  mtop->ffparams.iparams,ir));
 +  }
 +  
 +  if (fplog)
 +    fprintf(fplog,"Maximum distance for %d constraints, at 120 deg. angles, all-trans: %.3f nm\n",1+ir->nProjOrder,rmax);
 +
 +  return rmax;
 +}
 +
 +gmx_constr_t init_constraints(FILE *fplog,
 +                              gmx_mtop_t *mtop,t_inputrec *ir,
 +                              gmx_edsam_t ed,t_state *state,
 +                              t_commrec *cr)
 +{
 +    int  ncon,nset,nmol,settle_type,i,natoms,mt,nflexcon;
 +    struct gmx_constr *constr;
 +    char *env;
 +    t_ilist *ilist;
 +    gmx_mtop_ilistloop_t iloop;
 +    
 +    ncon =
 +        gmx_mtop_ftype_count(mtop,F_CONSTR) +
 +        gmx_mtop_ftype_count(mtop,F_CONSTRNC);
 +    nset = gmx_mtop_ftype_count(mtop,F_SETTLE);
 +    
 +    if (ncon+nset == 0 && ir->ePull != epullCONSTRAINT && ed == NULL) 
 +    {
 +        return NULL;
 +    }
 +    
 +    snew(constr,1);
 +    
 +    constr->ncon_tot = ncon;
 +    constr->nflexcon = 0;
 +    if (ncon > 0) 
 +    {
 +        constr->n_at2con_mt = mtop->nmoltype;
 +        snew(constr->at2con_mt,constr->n_at2con_mt);
 +        for(mt=0; mt<mtop->nmoltype; mt++) 
 +        {
 +            constr->at2con_mt[mt] = make_at2con(0,mtop->moltype[mt].atoms.nr,
 +                                                mtop->moltype[mt].ilist,
 +                                                mtop->ffparams.iparams,
 +                                                EI_DYNAMICS(ir->eI),&nflexcon);
 +            for(i=0; i<mtop->nmolblock; i++) 
 +            {
 +                if (mtop->molblock[i].type == mt) 
 +                {
 +                    constr->nflexcon += mtop->molblock[i].nmol*nflexcon;
 +                }
 +            }
 +        }
 +        
 +        if (constr->nflexcon > 0) 
 +        {
 +            if (fplog) 
 +            {
 +                fprintf(fplog,"There are %d flexible constraints\n",
 +                        constr->nflexcon);
 +                if (ir->fc_stepsize == 0) 
 +                {
 +                    fprintf(fplog,"\n"
 +                            "WARNING: step size for flexible constraining = 0\n"
 +                            "         All flexible constraints will be rigid.\n"
 +                            "         Will try to keep all flexible constraints at their original length,\n"
 +                            "         but the lengths may exhibit some drift.\n\n");
 +                    constr->nflexcon = 0;
 +                }
 +            }
 +            if (constr->nflexcon > 0) 
 +            {
 +                please_cite(fplog,"Hess2002");
 +            }
 +        }
 +        
 +        if (ir->eConstrAlg == econtLINCS) 
 +        {
 +            constr->lincsd = init_lincs(fplog,mtop,
 +                                        constr->nflexcon,constr->at2con_mt,
 +                                        DOMAINDECOMP(cr) && cr->dd->bInterCGcons,
 +                                        ir->nLincsIter,ir->nProjOrder);
 +        }
 +        
 +        if (ir->eConstrAlg == econtSHAKE) {
 +            if (DOMAINDECOMP(cr) && cr->dd->bInterCGcons)
 +            {
 +                gmx_fatal(FARGS,"SHAKE is not supported with domain decomposition and constraint that cross charge group boundaries, use LINCS");
 +            }
 +            if (constr->nflexcon) 
 +            {
 +                gmx_fatal(FARGS,"For this system also velocities and/or forces need to be constrained, this can not be done with SHAKE, you should select LINCS");
 +            }
 +            please_cite(fplog,"Ryckaert77a");
 +            if (ir->bShakeSOR) 
 +            {
 +                please_cite(fplog,"Barth95a");
 +            }
 +
 +            constr->shaked = shake_init();
 +        }
 +    }
 +  
 +    if (nset > 0) {
 +        please_cite(fplog,"Miyamoto92a");
 +
 +        constr->bInterCGsettles = inter_charge_group_settles(mtop);
 +
 +        /* Check that we have only one settle type */
 +        settle_type = -1;
 +        iloop = gmx_mtop_ilistloop_init(mtop);
 +        while (gmx_mtop_ilistloop_next(iloop,&ilist,&nmol)) 
 +        {
 +            for (i=0; i<ilist[F_SETTLE].nr; i+=4) 
 +            {
 +                if (settle_type == -1) 
 +                {
 +                    settle_type = ilist[F_SETTLE].iatoms[i];
 +                } 
 +                else if (ilist[F_SETTLE].iatoms[i] != settle_type) 
 +                {
 +                    gmx_fatal(FARGS,
 +                              "The [molecules] section of your topology specifies more than one block of\n"
 +                              "a [moleculetype] with a [settles] block. Only one such is allowed. If you\n"
 +                              "are trying to partition your solvent into different *groups* (e.g. for\n"
 +                              "freezing, T-coupling, etc.) then you are using the wrong approach. Index\n"
 +                              "files specify groups. Otherwise, you may wish to change the least-used\n"
 +                              "block of molecules with SETTLE constraints into 3 normal constraints.");
 +                }
 +            }
 +        }
 +
 +        constr->n_at2settle_mt = mtop->nmoltype;
 +        snew(constr->at2settle_mt,constr->n_at2settle_mt);
 +        for(mt=0; mt<mtop->nmoltype; mt++) 
 +        {
 +            constr->at2settle_mt[mt] =
 +                make_at2settle(mtop->moltype[mt].atoms.nr,
 +                               &mtop->moltype[mt].ilist[F_SETTLE]);
 +        }
 +    }
 +    
 +    constr->maxwarn = 999;
 +    env = getenv("GMX_MAXCONSTRWARN");
 +    if (env) 
 +    {
 +        constr->maxwarn = 0;
 +        sscanf(env,"%d",&constr->maxwarn);
 +        if (fplog) 
 +        {
 +            fprintf(fplog,
 +                    "Setting the maximum number of constraint warnings to %d\n",
 +                    constr->maxwarn);
 +        }
 +        if (MASTER(cr)) 
 +        {
 +            fprintf(stderr,
 +                    "Setting the maximum number of constraint warnings to %d\n",
 +                    constr->maxwarn);
 +        }
 +    }
 +    if (constr->maxwarn < 0 && fplog) 
 +    {
 +        fprintf(fplog,"maxwarn < 0, will not stop on constraint errors\n");
 +    }
 +    constr->warncount_lincs  = 0;
 +    constr->warncount_settle = 0;
 +    
 +    /* Initialize the essential dynamics sampling.
 +     * Put the pointer to the ED struct in constr */
 +    constr->ed = ed;
-         init_edsam(mtop,ir,cr,ed,state->x,state->box);
++    if (ed != NULL || state->edsamstate.nED > 0)
 +    {
++        init_edsam(mtop,ir,cr,ed,state->x,state->box,&state->edsamstate);
 +    }
 +    
 +    constr->warn_mtop = mtop;
 +    
 +    return constr;
 +}
 +
 +const t_blocka *atom2constraints_moltype(gmx_constr_t constr)
 +{
 +  return constr->at2con_mt;
 +}
 +
 +const int **atom2settle_moltype(gmx_constr_t constr)
 +{
 +    return (const int **)constr->at2settle_mt;
 +}
 +
 +
 +gmx_bool inter_charge_group_constraints(const gmx_mtop_t *mtop)
 +{
 +    const gmx_moltype_t *molt;
 +    const t_block *cgs;
 +    const t_ilist *il;
 +    int  mb;
 +    int  nat,*at2cg,cg,a,ftype,i;
 +    gmx_bool bInterCG;
 +
 +    bInterCG = FALSE;
 +    for(mb=0; mb<mtop->nmolblock && !bInterCG; mb++)
 +    {
 +        molt = &mtop->moltype[mtop->molblock[mb].type];
 +
 +        if (molt->ilist[F_CONSTR].nr   > 0 ||
 +            molt->ilist[F_CONSTRNC].nr > 0 ||
 +            molt->ilist[F_SETTLE].nr > 0)
 +        {
 +            cgs  = &molt->cgs;
 +            snew(at2cg,molt->atoms.nr);
 +            for(cg=0; cg<cgs->nr; cg++)
 +            {
 +                for(a=cgs->index[cg]; a<cgs->index[cg+1]; a++)
 +                    at2cg[a] = cg;
 +            }
 +
 +            for(ftype=F_CONSTR; ftype<=F_CONSTRNC; ftype++)
 +            {
 +                il = &molt->ilist[ftype];
 +                for(i=0; i<il->nr && !bInterCG; i+=1+NRAL(ftype))
 +                {
 +                    if (at2cg[il->iatoms[i+1]] != at2cg[il->iatoms[i+2]])
 +                    {
 +                        bInterCG = TRUE;
 +                    }
 +                }
 +            }
 +            
 +            sfree(at2cg);
 +        }
 +    }
 +
 +    return bInterCG;
 +}
 +
 +gmx_bool inter_charge_group_settles(const gmx_mtop_t *mtop)
 +{
 +    const gmx_moltype_t *molt;
 +    const t_block *cgs;
 +    const t_ilist *il;
 +    int  mb;
 +    int  nat,*at2cg,cg,a,ftype,i;
 +    gmx_bool bInterCG;
 +
 +    bInterCG = FALSE;
 +    for(mb=0; mb<mtop->nmolblock && !bInterCG; mb++)
 +    {
 +        molt = &mtop->moltype[mtop->molblock[mb].type];
 +
 +        if (molt->ilist[F_SETTLE].nr > 0)
 +        {
 +            cgs  = &molt->cgs;
 +            snew(at2cg,molt->atoms.nr);
 +            for(cg=0; cg<cgs->nr; cg++)
 +            {
 +                for(a=cgs->index[cg]; a<cgs->index[cg+1]; a++)
 +                    at2cg[a] = cg;
 +            }
 +
 +            for(ftype=F_SETTLE; ftype<=F_SETTLE; ftype++)
 +            {
 +                il = &molt->ilist[ftype];
 +                for(i=0; i<il->nr && !bInterCG; i+=1+NRAL(F_SETTLE))
 +                {
 +                    if (at2cg[il->iatoms[i+1]] != at2cg[il->iatoms[i+2]] ||
 +                        at2cg[il->iatoms[i+1]] != at2cg[il->iatoms[i+3]])
 +                    {
 +                        bInterCG = TRUE;
 +                    }
 +                }
 +            }       
 +            
 +            sfree(at2cg);
 +        }
 +    }
 +
 +    return bInterCG;
 +}
 +
 +/* helper functions for andersen temperature control, because the
 + * gmx_constr construct is only defined in constr.c. Return the list
 + * of blocks (get_sblock) and the number of blocks (get_nblocks).  */
 +
 +extern int *get_sblock(struct gmx_constr *constr)
 +{
 +    return constr->sblock;
 +}
 +
 +extern int get_nblocks(struct gmx_constr *constr)
 +{
 +    return constr->nblocks;
 +}
index 19a7e61364c9b4ea4dc5e6e63e940168592f220d,0000000000000000000000000000000000000000..b0fe96ea04934c74952588f8ffdcd4dae964295e
mode 100644,000000..100644
--- /dev/null
@@@ -1,1670 -1,0 +1,1670 @@@
-                          gmx_ekindata_t *ekind, tensor vir, real pcorr, real ecorr, t_extmass *MassQ)
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * GROwing Monsters And Cloning Shrimps
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +#include <assert.h>
 +
 +#include "typedefs.h"
 +#include "smalloc.h"
 +#include "update.h"
 +#include "vec.h"
 +#include "macros.h"
 +#include "physics.h"
 +#include "names.h"
 +#include "gmx_fatal.h"
 +#include "txtdump.h"
 +#include "nrnb.h"
 +#include "gmx_random.h"
 +#include "update.h"
 +#include "mdrun.h"
 +
 +#define NTROTTERPARTS 3
 +
 +/* Suzuki-Yoshida Constants, for n=3 and n=5, for symplectic integration  */
 +/* for n=1, w0 = 1 */
 +/* for n=3, w0 = w2 = 1/(2-2^-(1/3)), w1 = 1-2*w0 */
 +/* for n=5, w0 = w1 = w3 = w4 = 1/(4-4^-(1/3)), w1 = 1-4*w0 */
 +
 +#define MAX_SUZUKI_YOSHIDA_NUM 5
 +#define SUZUKI_YOSHIDA_NUM  5
 +
 +static const double sy_const_1[] = { 1. };
 +static const double sy_const_3[] = { 0.828981543588751,-0.657963087177502,0.828981543588751 };
 +static const double sy_const_5[] = { 0.2967324292201065,0.2967324292201065,-0.186929716880426,0.2967324292201065,0.2967324292201065 };
 +
 +static const double* sy_const[] = {
 +    NULL,
 +    sy_const_1,
 +    NULL,
 +    sy_const_3,
 +    NULL,
 +    sy_const_5
 +};
 +
 +/*
 +static const double sy_const[MAX_SUZUKI_YOSHIDA_NUM+1][MAX_SUZUKI_YOSHIDA_NUM+1] = {
 +    {},
 +    {1},
 +    {},
 +    {0.828981543588751,-0.657963087177502,0.828981543588751},
 +    {},
 +    {0.2967324292201065,0.2967324292201065,-0.186929716880426,0.2967324292201065,0.2967324292201065}
 +};*/
 +
 +/* these integration routines are only referenced inside this file */
 +static void NHC_trotter(t_grpopts *opts,int nvar, gmx_ekindata_t *ekind,real dtfull,
 +                        double xi[],double vxi[], double scalefac[], real *veta, t_extmass *MassQ, gmx_bool bEkinAveVel)
 +
 +{
 +    /* general routine for both barostat and thermostat nose hoover chains */
 +
 +    int   i,j,mi,mj,jmax;
 +    double Ekin,Efac,reft,kT,nd;
 +    double dt;
 +    t_grp_tcstat *tcstat;
 +    double *ivxi,*ixi;
 +    double *iQinv;
 +    double *GQ;
 +    gmx_bool bBarostat;
 +    int mstepsi, mstepsj;
 +    int ns = SUZUKI_YOSHIDA_NUM;  /* set the degree of integration in the types/state.h file */
 +    int nh = opts->nhchainlength;
 +    
 +    snew(GQ,nh);
 +    mstepsi = mstepsj = ns;
 +
 +/* if scalefac is NULL, we are doing the NHC of the barostat */
 +    
 +    bBarostat = FALSE;
 +    if (scalefac == NULL) {
 +        bBarostat = TRUE;
 +    }
 +
 +    for (i=0; i<nvar; i++) 
 +    {
 +
 +        /* make it easier to iterate by selecting 
 +           out the sub-array that corresponds to this T group */
 +        
 +        ivxi = &vxi[i*nh];
 +        ixi = &xi[i*nh];
 +        if (bBarostat) {
 +            iQinv = &(MassQ->QPinv[i*nh]); 
 +            nd = 1.0; /* THIS WILL CHANGE IF NOT ISOTROPIC */
 +            reft = max(0.0,opts->ref_t[0]);
 +            Ekin = sqr(*veta)/MassQ->Winv;
 +        } else {
 +            iQinv = &(MassQ->Qinv[i*nh]);  
 +            tcstat = &ekind->tcstat[i];
 +            nd = opts->nrdf[i];
 +            reft = max(0.0,opts->ref_t[i]);
 +            if (bEkinAveVel) 
 +            {
 +                Ekin = 2*trace(tcstat->ekinf)*tcstat->ekinscalef_nhc;
 +            } else {
 +                Ekin = 2*trace(tcstat->ekinh)*tcstat->ekinscaleh_nhc;
 +            }
 +        }
 +        kT = BOLTZ*reft;
 +
 +        for(mi=0;mi<mstepsi;mi++) 
 +        {
 +            for(mj=0;mj<mstepsj;mj++)
 +            { 
 +                /* weighting for this step using Suzuki-Yoshida integration - fixed at 5 */
 +                dt = sy_const[ns][mj] * dtfull / mstepsi;
 +                
 +                /* compute the thermal forces */
 +                GQ[0] = iQinv[0]*(Ekin - nd*kT);
 +                
 +                for (j=0;j<nh-1;j++) 
 +                {     
 +                    if (iQinv[j+1] > 0) {
 +                        /* we actually don't need to update here if we save the 
 +                           state of the GQ, but it's easier to just recompute*/
 +                        GQ[j+1] = iQinv[j+1]*((sqr(ivxi[j])/iQinv[j])-kT);      
 +                    } else {
 +                        GQ[j+1] = 0;
 +                    }
 +                }
 +                
 +                ivxi[nh-1] += 0.25*dt*GQ[nh-1];
 +                for (j=nh-1;j>0;j--) 
 +                { 
 +                    Efac = exp(-0.125*dt*ivxi[j]);
 +                    ivxi[j-1] = Efac*(ivxi[j-1]*Efac + 0.25*dt*GQ[j-1]);
 +                }
 +                
 +                Efac = exp(-0.5*dt*ivxi[0]);
 +                if (bBarostat) {
 +                    *veta *= Efac;                
 +                } else {
 +                    scalefac[i] *= Efac;
 +                }
 +                Ekin *= (Efac*Efac);
 +                
 +                /* Issue - if the KE is an average of the last and the current temperatures, then we might not be
 +                   able to scale the kinetic energy directly with this factor.  Might take more bookkeeping -- have to
 +                   think about this a bit more . . . */
 +
 +                GQ[0] = iQinv[0]*(Ekin - nd*kT);
 +                
 +                /* update thermostat positions */
 +                for (j=0;j<nh;j++) 
 +                { 
 +                    ixi[j] += 0.5*dt*ivxi[j];
 +                }
 +                
 +                for (j=0;j<nh-1;j++) 
 +                { 
 +                    Efac = exp(-0.125*dt*ivxi[j+1]);
 +                    ivxi[j] = Efac*(ivxi[j]*Efac + 0.25*dt*GQ[j]);
 +                    if (iQinv[j+1] > 0) {
 +                        GQ[j+1] = iQinv[j+1]*((sqr(ivxi[j])/iQinv[j])-kT);  
 +                    } else {
 +                        GQ[j+1] = 0;
 +                    }
 +                }
 +                ivxi[nh-1] += 0.25*dt*GQ[nh-1];
 +            }
 +        }
 +    }
 +    sfree(GQ);
 +}
 +
 +static void boxv_trotter(t_inputrec *ir, real *veta, real dt, tensor box, 
-     pscal   = calc_pres(ir->ePBC,nwall,box,ekinmod,vir,localpres);
++                         gmx_ekindata_t *ekind, tensor vir, real pcorr, t_extmass *MassQ)
 +{
 +
 +    real  pscal;
 +    double alpha;
 +    int   i,j,d,n,nwall;
 +    real  T,GW,vol;
 +    tensor Winvm,ekinmod,localpres;
 +    
 +    /* The heat bath is coupled to a separate barostat, the last temperature group.  In the 
 +       2006 Tuckerman et al paper., the order is iL_{T_baro} iL {T_part}
 +    */
 +    
 +    if (ir->epct==epctSEMIISOTROPIC) 
 +    {
 +        nwall = 2;
 +    } 
 +    else 
 +    {
 +        nwall = 3;
 +    }
 +
 +    /* eta is in pure units.  veta is in units of ps^-1. GW is in 
 +       units of ps^-2.  However, eta has a reference of 1 nm^3, so care must be 
 +       taken to use only RATIOS of eta in updating the volume. */
 +    
 +    /* we take the partial pressure tensors, modify the 
 +       kinetic energy tensor, and recovert to pressure */
 +    
 +    if (ir->opts.nrdf[0]==0) 
 +    { 
 +        gmx_fatal(FARGS,"Barostat is coupled to a T-group with no degrees of freedom\n");    
 +    } 
 +    /* alpha factor for phase space volume, then multiply by the ekin scaling factor.  */
 +    alpha = 1.0 + DIM/((double)ir->opts.nrdf[0]);
 +    alpha *= ekind->tcstat[0].ekinscalef_nhc;
 +    msmul(ekind->ekin,alpha,ekinmod);  
 +    /* for now, we use Elr = 0, because if you want to get it right, you
 +       really should be using PME. Maybe print a warning? */
 +
-                          enerd->term[F_PDISPCORR],enerd->term[F_DISPCORR],MassQ);
++    pscal   = calc_pres(ir->ePBC,nwall,box,ekinmod,vir,localpres)+pcorr;
 +
 +    vol = det(box);
 +    GW = (vol*(MassQ->Winv/PRESFAC))*(DIM*pscal - trace(ir->ref_p));   /* W is in ps^2 * bar * nm^3 */
 +
 +    *veta += 0.5*dt*GW;   
 +}
 +
 +/* 
 + * This file implements temperature and pressure coupling algorithms:
 + * For now only the Weak coupling and the modified weak coupling.
 + *
 + * Furthermore computation of pressure and temperature is done here
 + *
 + */
 +
 +real calc_pres(int ePBC,int nwall,matrix box,tensor ekin,tensor vir,
 +               tensor pres)
 +{
 +    int  n,m;
 +    real fac;
 +    
 +    if (ePBC==epbcNONE || (ePBC==epbcXY && nwall!=2))
 +        clear_mat(pres);
 +    else {
 +        /* Uitzoeken welke ekin hier van toepassing is, zie Evans & Morris - E. 
 +         * Wrs. moet de druktensor gecorrigeerd worden voor de netto stroom in  
 +         * het systeem...       
 +         */
 +        
 +        fac=PRESFAC*2.0/det(box);
 +        for(n=0; (n<DIM); n++)
 +            for(m=0; (m<DIM); m++)
 +                pres[n][m] = (ekin[n][m] - vir[n][m])*fac;
 +        
 +        if (debug) {
 +            pr_rvecs(debug,0,"PC: pres",pres,DIM);
 +            pr_rvecs(debug,0,"PC: ekin",ekin,DIM);
 +            pr_rvecs(debug,0,"PC: vir ",vir, DIM);
 +            pr_rvecs(debug,0,"PC: box ",box, DIM);
 +        }
 +    }
 +    return trace(pres)/DIM;
 +}
 +
 +real calc_temp(real ekin,real nrdf)
 +{
 +    if (nrdf > 0)
 +        return (2.0*ekin)/(nrdf*BOLTZ);
 +    else
 +        return 0;
 +}
 +
 +void parrinellorahman_pcoupl(FILE *fplog,gmx_large_int_t step,
 +                           t_inputrec *ir,real dt,tensor pres,
 +                           tensor box,tensor box_rel,tensor boxv,
 +                           tensor M,matrix mu,gmx_bool bFirstStep)
 +{
 +  /* This doesn't do any coordinate updating. It just
 +   * integrates the box vector equations from the calculated
 +   * acceleration due to pressure difference. We also compute
 +   * the tensor M which is used in update to couple the particle
 +   * coordinates to the box vectors.
 +   *
 +   * In Nose and Klein (Mol.Phys 50 (1983) no 5., p 1055) this is
 +   * given as
 +   *            -1    .           .     -1
 +   * M_nk = (h')   * (h' * h + h' h) * h
 +   *
 +   * with the dots denoting time derivatives and h is the transformation from
 +   * the scaled frame to the real frame, i.e. the TRANSPOSE of the box. 
 +   * This also goes for the pressure and M tensors - they are transposed relative
 +   * to ours. Our equation thus becomes:
 +   *
 +   *                  -1       .    .           -1
 +   * M_gmx = M_nk' = b  * (b * b' + b * b') * b'
 +   * 
 +   * where b is the gromacs box matrix.                       
 +   * Our box accelerations are given by
 +   *   ..                                    ..
 +   *   b = vol/W inv(box') * (P-ref_P)     (=h')
 +   */
 +  
 +  int    d,n;
 +  tensor winv;
 +  real   vol=box[XX][XX]*box[YY][YY]*box[ZZ][ZZ];
 +  real   atot,arel,change,maxchange,xy_pressure;
 +  tensor invbox,pdiff,t1,t2;
 +
 +  real maxl;
 +
 +  m_inv_ur0(box,invbox);
 +
 +  if (!bFirstStep) {
 +    /* Note that PRESFAC does not occur here.
 +     * The pressure and compressibility always occur as a product,
 +     * therefore the pressure unit drops out.
 +     */
 +    maxl=max(box[XX][XX],box[YY][YY]);
 +    maxl=max(maxl,box[ZZ][ZZ]);
 +    for(d=0;d<DIM;d++)
 +      for(n=0;n<DIM;n++)
 +      winv[d][n]=
 +        (4*M_PI*M_PI*ir->compress[d][n])/(3*ir->tau_p*ir->tau_p*maxl);
 +    
 +    m_sub(pres,ir->ref_p,pdiff);
 +    
 +    if(ir->epct==epctSURFACETENSION) {
 +      /* Unlike Berendsen coupling it might not be trivial to include a z
 +       * pressure correction here? On the other hand we don't scale the
 +       * box momentarily, but change accelerations, so it might not be crucial.
 +       */
 +      xy_pressure=0.5*(pres[XX][XX]+pres[YY][YY]);
 +      for(d=0;d<ZZ;d++)
 +      pdiff[d][d]=(xy_pressure-(pres[ZZ][ZZ]-ir->ref_p[d][d]/box[d][d]));
 +    }
 +    
 +    tmmul(invbox,pdiff,t1);
 +    /* Move the off-diagonal elements of the 'force' to one side to ensure
 +     * that we obey the box constraints.
 +     */
 +    for(d=0;d<DIM;d++) {
 +      for(n=0;n<d;n++) {
 +      t1[d][n] += t1[n][d];
 +      t1[n][d] = 0;
 +      }
 +    }
 +    
 +    switch (ir->epct) {
 +    case epctANISOTROPIC:
 +      for(d=0;d<DIM;d++) 
 +      for(n=0;n<=d;n++)
 +        t1[d][n] *= winv[d][n]*vol;
 +      break;
 +    case epctISOTROPIC:
 +      /* calculate total volume acceleration */
 +      atot=box[XX][XX]*box[YY][YY]*t1[ZZ][ZZ]+
 +      box[XX][XX]*t1[YY][YY]*box[ZZ][ZZ]+
 +      t1[XX][XX]*box[YY][YY]*box[ZZ][ZZ];
 +      arel=atot/(3*vol);
 +      /* set all RELATIVE box accelerations equal, and maintain total V
 +       * change speed */
 +      for(d=0;d<DIM;d++)
 +      for(n=0;n<=d;n++)
 +        t1[d][n] = winv[0][0]*vol*arel*box[d][n];    
 +      break;
 +    case epctSEMIISOTROPIC:
 +    case epctSURFACETENSION:
 +      /* Note the correction to pdiff above for surftens. coupling  */
 +      
 +      /* calculate total XY volume acceleration */
 +      atot=box[XX][XX]*t1[YY][YY]+t1[XX][XX]*box[YY][YY];
 +      arel=atot/(2*box[XX][XX]*box[YY][YY]);
 +      /* set RELATIVE XY box accelerations equal, and maintain total V
 +       * change speed. Dont change the third box vector accelerations */
 +      for(d=0;d<ZZ;d++)
 +      for(n=0;n<=d;n++)
 +        t1[d][n] = winv[d][n]*vol*arel*box[d][n];
 +      for(n=0;n<DIM;n++)
 +      t1[ZZ][n] *= winv[d][n]*vol;
 +      break;
 +    default:
 +      gmx_fatal(FARGS,"Parrinello-Rahman pressure coupling type %s "
 +                "not supported yet\n",EPCOUPLTYPETYPE(ir->epct));
 +      break;
 +    }
 +    
 +    maxchange=0;
 +    for(d=0;d<DIM;d++)
 +      for(n=0;n<=d;n++) {
 +      boxv[d][n] += dt*t1[d][n];
 +      
 +      /* We do NOT update the box vectors themselves here, since
 +       * we need them for shifting later. It is instead done last
 +       * in the update() routine.
 +       */
 +      
 +      /* Calculate the change relative to diagonal elements-
 +         since it's perfectly ok for the off-diagonal ones to
 +         be zero it doesn't make sense to check the change relative
 +         to its current size.
 +      */
 +      
 +      change=fabs(dt*boxv[d][n]/box[d][d]);
 +      
 +      if (change>maxchange)
 +        maxchange=change;
 +      }
 +    
 +    if (maxchange > 0.01 && fplog) {
 +      char buf[22];
 +      fprintf(fplog,
 +              "\nStep %s  Warning: Pressure scaling more than 1%%. "
 +              "This may mean your system\n is not yet equilibrated. "
 +              "Use of Parrinello-Rahman pressure coupling during\n"
 +              "equilibration can lead to simulation instability, "
 +              "and is discouraged.\n",
 +            gmx_step_str(step,buf));
 +    }
 +  }
 +  
 +  preserve_box_shape(ir,box_rel,boxv);
 +
 +  mtmul(boxv,box,t1);       /* t1=boxv * b' */
 +  mmul(invbox,t1,t2);
 +  mtmul(t2,invbox,M);
 +
 +  /* Determine the scaling matrix mu for the coordinates */
 +  for(d=0;d<DIM;d++)
 +    for(n=0;n<=d;n++)
 +      t1[d][n] = box[d][n] + dt*boxv[d][n];
 +  preserve_box_shape(ir,box_rel,t1);
 +  /* t1 is the box at t+dt, determine mu as the relative change */
 +  mmul_ur0(invbox,t1,mu);
 +}
 +
 +void berendsen_pcoupl(FILE *fplog,gmx_large_int_t step, 
 +                    t_inputrec *ir,real dt, tensor pres,matrix box,
 +                    matrix mu)
 +{
 +  int    d,n;
 +  real   scalar_pressure, xy_pressure, p_corr_z;
 +  char   *ptr,buf[STRLEN];
 +
 +  /*
 +   *  Calculate the scaling matrix mu
 +   */
 +  scalar_pressure=0;
 +  xy_pressure=0;
 +  for(d=0; d<DIM; d++) {
 +    scalar_pressure += pres[d][d]/DIM;
 +    if (d != ZZ)
 +      xy_pressure += pres[d][d]/(DIM-1);
 +  }
 +  /* Pressure is now in bar, everywhere. */
 +#define factor(d,m) (ir->compress[d][m]*dt/ir->tau_p)
 +  
 +  /* mu has been changed from pow(1+...,1/3) to 1+.../3, since this is
 +   * necessary for triclinic scaling
 +   */
 +  clear_mat(mu);
 +  switch (ir->epct) {
 +  case epctISOTROPIC:
 +    for(d=0; d<DIM; d++) 
 +      {
 +      mu[d][d] = 1.0 - factor(d,d)*(ir->ref_p[d][d] - scalar_pressure) /DIM;
 +      }
 +    break;
 +  case epctSEMIISOTROPIC:
 +    for(d=0; d<ZZ; d++)
 +      mu[d][d] = 1.0 - factor(d,d)*(ir->ref_p[d][d]-xy_pressure)/DIM;
 +    mu[ZZ][ZZ] = 
 +      1.0 - factor(ZZ,ZZ)*(ir->ref_p[ZZ][ZZ] - pres[ZZ][ZZ])/DIM;
 +    break;
 +  case epctANISOTROPIC:
 +    for(d=0; d<DIM; d++)
 +      for(n=0; n<DIM; n++)
 +      mu[d][n] = (d==n ? 1.0 : 0.0) 
 +        -factor(d,n)*(ir->ref_p[d][n] - pres[d][n])/DIM;
 +    break;
 +  case epctSURFACETENSION:
 +    /* ir->ref_p[0/1] is the reference surface-tension times *
 +     * the number of surfaces                                */
 +    if (ir->compress[ZZ][ZZ])
 +      p_corr_z = dt/ir->tau_p*(ir->ref_p[ZZ][ZZ] - pres[ZZ][ZZ]);
 +    else
 +      /* when the compressibity is zero, set the pressure correction   *
 +       * in the z-direction to zero to get the correct surface tension */
 +      p_corr_z = 0;
 +    mu[ZZ][ZZ] = 1.0 - ir->compress[ZZ][ZZ]*p_corr_z;
 +    for(d=0; d<DIM-1; d++)
 +      mu[d][d] = 1.0 + factor(d,d)*(ir->ref_p[d][d]/(mu[ZZ][ZZ]*box[ZZ][ZZ])
 +                                  - (pres[ZZ][ZZ]+p_corr_z - xy_pressure))/(DIM-1);
 +    break;
 +  default:
 +    gmx_fatal(FARGS,"Berendsen pressure coupling type %s not supported yet\n",
 +              EPCOUPLTYPETYPE(ir->epct));
 +    break;
 +  }
 +  /* To fullfill the orientation restrictions on triclinic boxes
 +   * we will set mu_yx, mu_zx and mu_zy to 0 and correct
 +   * the other elements of mu to first order.
 +   */
 +  mu[YY][XX] += mu[XX][YY];
 +  mu[ZZ][XX] += mu[XX][ZZ];
 +  mu[ZZ][YY] += mu[YY][ZZ];
 +  mu[XX][YY] = 0;
 +  mu[XX][ZZ] = 0;
 +  mu[YY][ZZ] = 0;
 +
 +  if (debug) {
 +    pr_rvecs(debug,0,"PC: pres ",pres,3);
 +    pr_rvecs(debug,0,"PC: mu   ",mu,3);
 +  }
 +  
 +  if (mu[XX][XX]<0.99 || mu[XX][XX]>1.01 ||
 +      mu[YY][YY]<0.99 || mu[YY][YY]>1.01 ||
 +      mu[ZZ][ZZ]<0.99 || mu[ZZ][ZZ]>1.01) {
 +    char buf2[22];
 +    sprintf(buf,"\nStep %s  Warning: pressure scaling more than 1%%, "
 +          "mu: %g %g %g\n",
 +          gmx_step_str(step,buf2),mu[XX][XX],mu[YY][YY],mu[ZZ][ZZ]);
 +    if (fplog)
 +      fprintf(fplog,"%s",buf);
 +    fprintf(stderr,"%s",buf);
 +  }
 +}
 +
 +void berendsen_pscale(t_inputrec *ir,matrix mu,
 +                    matrix box,matrix box_rel,
 +                    int start,int nr_atoms,
 +                    rvec x[],unsigned short cFREEZE[],
 +                    t_nrnb *nrnb)
 +{
 +  ivec   *nFreeze=ir->opts.nFreeze;
 +  int    n,d,g=0;
 +      
 +  /* Scale the positions */
 +  for (n=start; n<start+nr_atoms; n++) {
 +    if (cFREEZE)
 +      g = cFREEZE[n];
 +    
 +    if (!nFreeze[g][XX])
 +      x[n][XX] = mu[XX][XX]*x[n][XX]+mu[YY][XX]*x[n][YY]+mu[ZZ][XX]*x[n][ZZ];
 +    if (!nFreeze[g][YY])
 +      x[n][YY] = mu[YY][YY]*x[n][YY]+mu[ZZ][YY]*x[n][ZZ];
 +    if (!nFreeze[g][ZZ])
 +      x[n][ZZ] = mu[ZZ][ZZ]*x[n][ZZ];
 +  }
 +  /* compute final boxlengths */
 +  for (d=0; d<DIM; d++) {
 +    box[d][XX] = mu[XX][XX]*box[d][XX]+mu[YY][XX]*box[d][YY]+mu[ZZ][XX]*box[d][ZZ];
 +    box[d][YY] = mu[YY][YY]*box[d][YY]+mu[ZZ][YY]*box[d][ZZ];
 +    box[d][ZZ] = mu[ZZ][ZZ]*box[d][ZZ];
 +  }      
 +
 +  preserve_box_shape(ir,box_rel,box);
 +  
 +  /* (un)shifting should NOT be done after this,
 +   * since the box vectors might have changed
 +   */
 +  inc_nrnb(nrnb,eNR_PCOUPL,nr_atoms);
 +}
 +
 +void berendsen_tcoupl(t_inputrec *ir,gmx_ekindata_t *ekind,real dt)
 +{
 +    t_grpopts *opts;
 +    int    i;
 +    real   T,reft=0,lll;
 +
 +    opts = &ir->opts;
 +
 +    for(i=0; (i<opts->ngtc); i++)
 +    {
 +        if (ir->eI == eiVV)
 +        {
 +            T = ekind->tcstat[i].T;
 +        }
 +        else
 +        {
 +            T = ekind->tcstat[i].Th;
 +        }
 +
 +        if ((opts->tau_t[i] > 0) && (T > 0.0)) {  
 +            reft = max(0.0,opts->ref_t[i]);
 +            lll  = sqrt(1.0 + (dt/opts->tau_t[i])*(reft/T-1.0));
 +            ekind->tcstat[i].lambda = max(min(lll,1.25),0.8);
 +        }
 +        else {
 +            ekind->tcstat[i].lambda = 1.0;
 +        }
 +
 +        if (debug)
 +        {
 +            fprintf(debug,"TC: group %d: T: %g, Lambda: %g\n",
 +                    i,T,ekind->tcstat[i].lambda);
 +        }
 +    }
 +}
 +
 +static int poisson_variate(real lambda,gmx_rng_t rng) {
 +
 +    real L;
 +    int k=0;
 +    real p=1.0;
 +
 +    L = exp(-lambda);
 +
 +    do
 +    {
 +        k = k+1;
 +        p *= gmx_rng_uniform_real(rng);
 +    } while (p>L);
 +
 +    return k-1;
 +}
 +
 +void andersen_tcoupl(t_inputrec *ir,t_mdatoms *md,t_state *state, gmx_rng_t rng, real rate, t_idef *idef, int nblocks, int *sblock,gmx_bool *randatom, int *randatom_list, gmx_bool *randomize, real *boltzfac)
 +{
 +    t_grpopts *opts;
 +    int    i,j,k,d,len,n,ngtc,gc=0;
 +    int    nshake, nsettle, nrandom, nrand_group;
 +    real   boltz,scal,reft,prand;
 +    t_iatom *iatoms;
 +
 +    /* convenience variables */
 +    opts = &ir->opts;
 +    ngtc = opts->ngtc;
 +
 +    /* idef is only passed in if it's chance-per-particle andersen, so
 +       it essentially serves as a boolean to determine which type of
 +       andersen is being used */
 +    if (idef) {
 +
 +        /* randomly atoms to randomize.  However, all constraint
 +           groups have to have either all of the atoms or none of the
 +           atoms randomize.
 +
 +           Algorithm:
 +           1. Select whether or not to randomize each atom to get the correct probability.
 +           2. Cycle through the constraint groups.
 +              2a. for each constraint group, determine the fraction f of that constraint group that are
 +                  chosen to be randomized.
 +              2b. all atoms in the constraint group are randomized with probability f.
 +        */
 +
 +        nrandom = 0;
 +        if ((rate < 0.05) && (md->homenr > 50))
 +        {
 +            /* if the rate is relatively high, use a standard method, if low rate,
 +             * use poisson */
 +            /* poisson distributions approxmation, more efficient for
 +             * low rates, fewer random numbers required */
 +            nrandom = poisson_variate(md->homenr*rate,rng);  /* how many do we randomize? Use Poisson. */
 +            /* now we know how many, choose them randomly. No worries about repeats, at this rate, it's negligible.
 +               worst thing that happens, it lowers the true rate an negligible amount */
 +            for (i=0;i<nrandom;i++)
 +            {
 +                randatom[(int)(gmx_rng_uniform_real(rng)*md->homenr)] = TRUE;
 +            }
 +        }
 +        else
 +        {
 +            for (i=0;i<md->homenr;i++)
 +            {
 +                if (gmx_rng_uniform_real(rng)<rate)
 +                {
 +                    randatom[i] = TRUE;
 +                    nrandom++;
 +                }
 +            }
 +        }
 +
 +        /* instead of looping over the constraint groups, if we had a
 +           list of which atoms were in which constraint groups, we
 +           could then loop over only the groups that are randomized
 +           now.  But that is not available now.  Create later after
 +           determining whether there actually is any slowing. */
 +
 +        /* first, loop through the settles to make sure all groups either entirely randomized, or not randomized. */
 +
 +        nsettle  = idef->il[F_SETTLE].nr/2;
 +        for (i=0;i<nsettle;i++)
 +        {
 +            iatoms = idef->il[F_SETTLE].iatoms;
 +            nrand_group = 0;
 +            for (k=0;k<3;k++)  /* settles are always 3 atoms, hardcoded */
 +            {
 +                if (randatom[iatoms[2*i+1]+k])
 +                {
 +                    nrand_group++;     /* count the number of atoms to be shaken in the settles group */
 +                    randatom[iatoms[2*i+1]+k] = FALSE;
 +                    nrandom--;
 +                }
 +            }
 +            if (nrand_group > 0)
 +            {
 +                prand = (nrand_group)/3.0;  /* use this fraction to compute the probability the
 +                                               whole group is randomized */
 +                if (gmx_rng_uniform_real(rng)<prand)
 +                {
 +                    for (k=0;k<3;k++)
 +                    {
 +                        randatom[iatoms[2*i+1]+k] = TRUE;   /* mark them all to be randomized */
 +                    }
 +                    nrandom+=3;
 +                }
 +            }
 +        }
 +
 +        /* now loop through the shake groups */
 +        nshake = nblocks;
 +        for (i=0;i<nshake;i++)
 +        {
 +            iatoms = &(idef->il[F_CONSTR].iatoms[sblock[i]]);
 +            len = sblock[i+1]-sblock[i];
 +            nrand_group = 0;
 +            for (k=0;k<len;k++)
 +            {
 +                if (k%3 != 0)
 +                {  /* only 2/3 of the sblock items are atoms, the others are labels */
 +                    if (randatom[iatoms[k]])
 +                    {
 +                        nrand_group++;
 +                        randatom[iatoms[k]] = FALSE;  /* need to mark it false here in case the atom is in more than
 +                                                         one group in the shake block */
 +                        nrandom--;
 +                    }
 +                }
 +            }
 +            if (nrand_group > 0)
 +            {
 +                prand = (nrand_group)/(1.0*(2*len/3));
 +                if (gmx_rng_uniform_real(rng)<prand)
 +                {
 +                    for (k=0;k<len;k++)
 +                    {
 +                        if (k%3 != 0)
 +                        {  /* only 2/3 of the sblock items are atoms, the others are labels */
 +                            randatom[iatoms[k]] = TRUE; /* randomize all of them */
 +                            nrandom++;
 +                        }
 +                    }
 +                }
 +            }
 +        }
 +        if (nrandom > 0)
 +        {
 +            n = 0;
 +            for (i=0;i<md->homenr;i++)  /* now loop over the list of atoms */
 +            {
 +                if (randatom[i])
 +                {
 +                    randatom_list[n] = i;
 +                    n++;
 +                }
 +            }
 +            nrandom = n;  /* there are some values of nrandom for which
 +                             this algorithm won't work; for example all
 +                             water molecules and nrandom =/= 3.  Better to
 +                             recount and use this number (which we
 +                             calculate anyway: it will not affect
 +                             the average number of atoms accepted.
 +                          */
 +        }
 +    }
 +    else
 +    {
 +        /* if it's andersen-massive, then randomize all the atoms */
 +        nrandom = md->homenr;
 +        for (i=0;i<nrandom;i++)
 +        {
 +            randatom_list[i] = i;
 +        }
 +    }
 +
 +    /* randomize the velocities of the selected particles */
 +
 +    for (i=0;i<nrandom;i++)  /* now loop over the list of atoms */
 +    {
 +        n = randatom_list[i];
 +        if (md->cTC)
 +        {
 +            gc   = md->cTC[n];  /* assign the atom to a temperature group if there are more than one */
 +        }
 +        if (randomize[gc])
 +        {
 +            scal = sqrt(boltzfac[gc]*md->invmass[n]);
 +            for (d=0;d<DIM;d++)
 +            {
 +                state->v[n][d] = scal*gmx_rng_gaussian_table(rng);
 +            }
 +        }
 +        randatom[n] = FALSE; /* unmark this atom for randomization */
 +    }
 +}
 +
 +
 +void nosehoover_tcoupl(t_grpopts *opts,gmx_ekindata_t *ekind,real dt,
 +                       double xi[],double vxi[], t_extmass *MassQ)
 +{
 +    int   i;
 +    real  reft,oldvxi;
 +    
 +    /* note that this routine does not include Nose-hoover chains yet. Should be easy to add. */
 +    
 +    for(i=0; (i<opts->ngtc); i++)
 +    {
 +        reft = max(0.0,opts->ref_t[i]);
 +        oldvxi = vxi[i];
 +        vxi[i]  += dt*MassQ->Qinv[i]*(ekind->tcstat[i].Th - reft);
 +        xi[i] += dt*(oldvxi + vxi[i])*0.5;
 +    }
 +}
 +
 +t_state *init_bufstate(const t_state *template_state)
 +{
 +    t_state *state;
 +    int nc = template_state->nhchainlength;
 +    snew(state,1);
 +    snew(state->nosehoover_xi,nc*template_state->ngtc);
 +    snew(state->nosehoover_vxi,nc*template_state->ngtc);
 +    snew(state->therm_integral,template_state->ngtc);
 +    snew(state->nhpres_xi,nc*template_state->nnhpres);
 +    snew(state->nhpres_vxi,nc*template_state->nnhpres);
 +
 +    return state;
 +}  
 +
 +void destroy_bufstate(t_state *state) 
 +{
 +    sfree(state->x);
 +    sfree(state->v);
 +    sfree(state->nosehoover_xi);
 +    sfree(state->nosehoover_vxi);
 +    sfree(state->therm_integral);
 +    sfree(state->nhpres_xi);
 +    sfree(state->nhpres_vxi);
 +    sfree(state);
 +}  
 +
 +void trotter_update(t_inputrec *ir,gmx_large_int_t step, gmx_ekindata_t *ekind, 
 +                    gmx_enerdata_t *enerd, t_state *state, 
 +                    tensor vir, t_mdatoms *md, 
 +                    t_extmass *MassQ, int **trotter_seqlist, int trotter_seqno) 
 +{
 +    
 +    int n,i,j,d,ntgrp,ngtc,gc=0;
 +    t_grp_tcstat *tcstat;
 +    t_grpopts *opts;
 +    gmx_large_int_t step_eff;
 +    real ecorr,pcorr,dvdlcorr;
 +    real bmass,qmass,reft,kT,dt,nd;
 +    tensor dumpres,dumvir;
 +    double *scalefac,dtc;
 +    int *trotter_seq;
 +    rvec sumv={0,0,0},consk;
 +    gmx_bool bCouple;
 +
 +    if (trotter_seqno <= ettTSEQ2)
 +    {
 +        step_eff = step-1;  /* the velocity verlet calls are actually out of order -- the first half step
 +                               is actually the last half step from the previous step.  Thus the first half step
 +                               actually corresponds to the n-1 step*/
 +                               
 +    } else {
 +        step_eff = step;
 +    }
 +
 +    bCouple = (ir->nsttcouple == 1 ||
 +               do_per_step(step_eff+ir->nsttcouple,ir->nsttcouple));
 +
 +    trotter_seq = trotter_seqlist[trotter_seqno];
 +
 +    /* signal we are returning if nothing is going to be done in this routine */
 +    if ((trotter_seq[0] == etrtSKIPALL)  || !(bCouple))
 +    {
 +        return;
 +    }
 +
 +    dtc = ir->nsttcouple*ir->delta_t;
 +    opts = &(ir->opts); /* just for ease of referencing */
 +    ngtc = opts->ngtc;
 +    assert(ngtc>0);
 +    snew(scalefac,opts->ngtc);
 +    for (i=0;i<ngtc;i++) 
 +    {
 +        scalefac[i] = 1;
 +    }
 +    /* execute the series of trotter updates specified in the trotterpart array */
 +    
 +    for (i=0;i<NTROTTERPARTS;i++){
 +        /* allow for doubled intgrators by doubling dt instead of making 2 calls */
 +        if ((trotter_seq[i] == etrtBAROV2) || (trotter_seq[i] == etrtBARONHC2) || (trotter_seq[i] == etrtNHC2))
 +        {
 +            dt = 2 * dtc;
 +        }
 +        else 
 +        {
 +            dt = dtc;
 +        }
 +
 +        switch (trotter_seq[i])
 +        {
 +        case etrtBAROV:
 +        case etrtBAROV2:
 +            boxv_trotter(ir,&(state->veta),dt,state->box,ekind,vir,
++                         enerd->term[F_PDISPCORR],MassQ);
 +            break;
 +        case etrtBARONHC:
 +        case etrtBARONHC2:
 +            NHC_trotter(opts,state->nnhpres,ekind,dt,state->nhpres_xi,
 +                        state->nhpres_vxi,NULL,&(state->veta),MassQ,FALSE);      
 +            break;
 +        case etrtNHC:
 +        case etrtNHC2:
 +            NHC_trotter(opts,opts->ngtc,ekind,dt,state->nosehoover_xi,
 +                        state->nosehoover_vxi,scalefac,NULL,MassQ,(ir->eI==eiVV));
 +            /* need to rescale the kinetic energies and velocities here.  Could 
 +               scale the velocities later, but we need them scaled in order to 
 +               produce the correct outputs, so we'll scale them here. */
 +            
 +            for (i=0; i<ngtc;i++) 
 +            {
 +                tcstat = &ekind->tcstat[i];
 +                tcstat->vscale_nhc = scalefac[i]; 
 +                tcstat->ekinscaleh_nhc *= (scalefac[i]*scalefac[i]); 
 +                tcstat->ekinscalef_nhc *= (scalefac[i]*scalefac[i]); 
 +            }
 +            /* now that we've scaled the groupwise velocities, we can add them up to get the total */
 +            /* but do we actually need the total? */
 +            
 +            /* modify the velocities as well */
 +            for (n=md->start;n<md->start+md->homenr;n++) 
 +            {
 +                if (md->cTC)   /* does this conditional need to be here? is this always true?*/
 +                { 
 +                    gc = md->cTC[n];
 +                }
 +                for (d=0;d<DIM;d++) 
 +                {
 +                    state->v[n][d] *= scalefac[gc];
 +                }
 +                
 +                if (debug) 
 +                {
 +                    for (d=0;d<DIM;d++) 
 +                    {
 +                        sumv[d] += (state->v[n][d])/md->invmass[n];
 +                    }
 +                }
 +            }          
 +            break;
 +        default:
 +            break;
 +        }
 +    }
 +    /* check for conserved momentum -- worth looking at this again eventually, but not working right now.*/  
 +#if 0
 +    if (debug) 
 +    {
 +        if (bFirstHalf) 
 +        {
 +            for (d=0;d<DIM;d++) 
 +            {
 +                consk[d] = sumv[d]*exp((1 + 1.0/opts->nrdf[0])*((1.0/DIM)*log(det(state->box)/state->vol0)) + state->nosehoover_xi[0]); 
 +            }
 +            fprintf(debug,"Conserved kappa: %15.8f %15.8f %15.8f\n",consk[0],consk[1],consk[2]);    
 +        }
 +    }
 +#endif
 +    sfree(scalefac);
 +}
 +
 +
 +extern void init_npt_masses(t_inputrec *ir, t_state *state, t_extmass *MassQ, gmx_bool bInit)
 +{
 +    int n,i,j,d,ntgrp,ngtc,nnhpres,nh,gc=0;
 +    t_grp_tcstat *tcstat;
 +    t_grpopts *opts;
 +    real ecorr,pcorr,dvdlcorr;
 +    real bmass,qmass,reft,kT,dt,ndj,nd;
 +    tensor dumpres,dumvir;
 +
 +    opts = &(ir->opts); /* just for ease of referencing */
 +    ngtc = ir->opts.ngtc;
 +    nnhpres = state->nnhpres;
 +    nh = state->nhchainlength; 
 +
 +    if (ir->eI == eiMD) {
 +        if (bInit)
 +        {
 +            snew(MassQ->Qinv,ngtc);
 +        }
 +        for(i=0; (i<ngtc); i++) 
 +        { 
 +            if ((opts->tau_t[i] > 0) && (opts->ref_t[i] > 0)) 
 +            {
 +                MassQ->Qinv[i]=1.0/(sqr(opts->tau_t[i]/M_2PI)*opts->ref_t[i]);
 +            } 
 +            else 
 +            {
 +                MassQ->Qinv[i]=0.0;     
 +            }
 +        }
 +    }
 +    else if (EI_VV(ir->eI))
 +    {
 +    /* Set pressure variables */
 +
 +        if (bInit)
 +        {
 +            if (state->vol0 == 0)
 +            {
 +                state->vol0 = det(state->box); 
 +                /* because we start by defining a fixed
 +                   compressibility, we need the volume at this
 +                   compressibility to solve the problem. */
 +            }
 +        }
 +
 +        /* units are nm^3 * ns^2 / (nm^3 * bar / kJ/mol) = kJ/mol  */
 +        /* Consider evaluating eventually if this the right mass to use.  All are correct, some might be more stable  */
 +        MassQ->Winv = (PRESFAC*trace(ir->compress)*BOLTZ*opts->ref_t[0])/(DIM*state->vol0*sqr(ir->tau_p/M_2PI));
 +        /* An alternate mass definition, from Tuckerman et al. */ 
 +        /* MassQ->Winv = 1.0/(sqr(ir->tau_p/M_2PI)*(opts->nrdf[0]+DIM)*BOLTZ*opts->ref_t[0]); */
 +        for (d=0;d<DIM;d++) 
 +        {
 +            for (n=0;n<DIM;n++) 
 +            {
 +                MassQ->Winvm[d][n]= PRESFAC*ir->compress[d][n]/(state->vol0*sqr(ir->tau_p/M_2PI)); 
 +                /* not clear this is correct yet for the anisotropic case. Will need to reevaluate
 +                 before using MTTK for anisotropic states.*/
 +            }
 +        }
 +        /* Allocate space for thermostat variables */
 +        if (bInit)
 +        {
 +            snew(MassQ->Qinv,ngtc*nh);
 +        }
 +
 +        /* now, set temperature variables */
 +        for (i=0; i<ngtc; i++)
 +        {
 +            if ((opts->tau_t[i] > 0) && (opts->ref_t[i] > 0))
 +            {
 +                reft = max(0.0,opts->ref_t[i]);
 +                nd = opts->nrdf[i];
 +                kT = BOLTZ*reft;
 +                for (j=0;j<nh;j++)
 +                {
 +                    if (j==0)
 +                    {
 +                        ndj = nd;
 +                    }
 +                    else
 +                    {
 +                        ndj = 1;
 +                    }
 +                    MassQ->Qinv[i*nh+j]   = 1.0/(sqr(opts->tau_t[i]/M_2PI)*ndj*kT);
 +                }
 +            }
 +            else
 +            {
 +                reft=0.0;
 +                for (j=0;j<nh;j++)
 +                {
 +                    MassQ->Qinv[i*nh+j] = 0.0;
 +                }
 +            }
 +        }
 +    }
 +}
 +
 +int **init_npt_vars(t_inputrec *ir, t_state *state, t_extmass *MassQ, gmx_bool bTrotter)
 +{
 +    int n,i,j,d,ntgrp,ngtc,nnhpres,nh,gc=0;
 +    t_grp_tcstat *tcstat;
 +    t_grpopts *opts;
 +    real ecorr,pcorr,dvdlcorr;
 +    real bmass,qmass,reft,kT,dt,ndj,nd;
 +    tensor dumpres,dumvir;
 +    int **trotter_seq;
 +
 +    opts = &(ir->opts); /* just for ease of referencing */
 +    ngtc = state->ngtc;
 +    nnhpres = state->nnhpres;
 +    nh = state->nhchainlength;
 +
 +    init_npt_masses(ir, state, MassQ, TRUE);
 +    
 +    /* first, initialize clear all the trotter calls */
 +    snew(trotter_seq,ettTSEQMAX);
 +    for (i=0;i<ettTSEQMAX;i++) 
 +    {
 +        snew(trotter_seq[i],NTROTTERPARTS);
 +        for (j=0;j<NTROTTERPARTS;j++) {
 +            trotter_seq[i][j] = etrtNONE;
 +        }
 +        trotter_seq[i][0] = etrtSKIPALL;
 +    }
 +    
 +    if (!bTrotter) 
 +    {
 +        /* no trotter calls, so we never use the values in the array.
 +         * We access them (so we need to define them, but ignore
 +         * then.*/
 +
 +        return trotter_seq;
 +    }
 +
 +    /* compute the kinetic energy by using the half step velocities or
 +     * the kinetic energies, depending on the order of the trotter calls */
 +
 +    if (ir->eI==eiVV)
 +    {
 +        if (IR_NPT_TROTTER(ir)) 
 +        {
 +            /* This is the complicated version - there are 4 possible calls, depending on ordering.
 +               We start with the initial one. */
 +            /* first, a round that estimates veta. */
 +            trotter_seq[0][0] = etrtBAROV;
 +
 +            /* trotter_seq[1] is etrtNHC for 1/2 step velocities - leave zero */
 +
 +            /* The first half trotter update */
 +            trotter_seq[2][0] = etrtBAROV;
 +            trotter_seq[2][1] = etrtNHC;
 +            trotter_seq[2][2] = etrtBARONHC;
 +
 +            /* The second half trotter update */
 +            trotter_seq[3][0] = etrtBARONHC;
 +            trotter_seq[3][1] = etrtNHC;
 +            trotter_seq[3][2] = etrtBAROV;
 +
 +            /* trotter_seq[4] is etrtNHC for second 1/2 step velocities - leave zero */
 +            
 +        } 
 +        else if (IR_NVT_TROTTER(ir))
 +        {
 +            /* This is the easy version - there are only two calls, both the same.
 +               Otherwise, even easier -- no calls  */
 +            trotter_seq[2][0] = etrtNHC;
 +            trotter_seq[3][0] = etrtNHC;
 +        }
 +        else if (IR_NPH_TROTTER(ir))
 +        {
 +            /* This is the complicated version - there are 4 possible calls, depending on ordering.
 +               We start with the initial one. */
 +            /* first, a round that estimates veta. */
 +            trotter_seq[0][0] = etrtBAROV;
 +
 +            /* trotter_seq[1] is etrtNHC for 1/2 step velocities - leave zero */
 +
 +            /* The first half trotter update */
 +            trotter_seq[2][0] = etrtBAROV;
 +            trotter_seq[2][1] = etrtBARONHC;
 +
 +            /* The second half trotter update */
 +            trotter_seq[3][0] = etrtBARONHC;
 +            trotter_seq[3][1] = etrtBAROV;
 +
 +            /* trotter_seq[4] is etrtNHC for second 1/2 step velocities - leave zero */
 +        }
 +    }
 +    else if (ir->eI==eiVVAK)
 +    {
 +        if (IR_NPT_TROTTER(ir))
 +        {
 +            /* This is the complicated version - there are 4 possible calls, depending on ordering.
 +               We start with the initial one. */
 +            /* first, a round that estimates veta. */
 +            trotter_seq[0][0] = etrtBAROV;
 +
 +            /* The first half trotter update, part 1 -- double update, because it commutes */
 +            trotter_seq[1][0] = etrtNHC;
 +
 +            /* The first half trotter update, part 2 */
 +            trotter_seq[2][0] = etrtBAROV;
 +            trotter_seq[2][1] = etrtBARONHC;
 +            
 +            /* The second half trotter update, part 1 */
 +            trotter_seq[3][0] = etrtBARONHC;
 +            trotter_seq[3][1] = etrtBAROV;
 +
 +            /* The second half trotter update */
 +            trotter_seq[4][0] = etrtNHC;
 +        } 
 +        else if (IR_NVT_TROTTER(ir))
 +        {
 +            /* This is the easy version - there is only one call, both the same.
 +               Otherwise, even easier -- no calls  */
 +            trotter_seq[1][0] = etrtNHC;
 +            trotter_seq[4][0] = etrtNHC;
 +        }
 +        else if (IR_NPH_TROTTER(ir))
 +        {
 +            /* This is the complicated version - there are 4 possible calls, depending on ordering.
 +               We start with the initial one. */
 +            /* first, a round that estimates veta. */
 +            trotter_seq[0][0] = etrtBAROV; 
 +
 +            /* The first half trotter update, part 1 -- leave zero */
 +            trotter_seq[1][0] = etrtNHC;
 +
 +            /* The first half trotter update, part 2 */
 +            trotter_seq[2][0] = etrtBAROV;
 +            trotter_seq[2][1] = etrtBARONHC;
 +
 +            /* The second half trotter update, part 1 */
 +            trotter_seq[3][0] = etrtBARONHC;
 +            trotter_seq[3][1] = etrtBAROV;
 +
 +            /* The second half trotter update -- blank for now */
 +        }
 +    }
 +
 +    switch (ir->epct)
 +    {
 +    case epctISOTROPIC:  
 +    default:
 +        bmass = DIM*DIM;  /* recommended mass parameters for isotropic barostat */
 +    }    
 +
 +    snew(MassQ->QPinv,nnhpres*opts->nhchainlength);
 +
 +    /* barostat temperature */
 +    if ((ir->tau_p > 0) && (opts->ref_t[0] > 0))
 +    {
 +        reft = max(0.0,opts->ref_t[0]);
 +        kT = BOLTZ*reft;
 +        for (i=0;i<nnhpres;i++) {
 +            for (j=0;j<nh;j++) 
 +            {
 +                if (j==0) {
 +                    qmass = bmass;
 +                } 
 +                else 
 +                {
 +                    qmass = 1;
 +                }
 +                MassQ->QPinv[i*opts->nhchainlength+j]   = 1.0/(sqr(opts->tau_t[0]/M_2PI)*qmass*kT);
 +            }
 +        }
 +    }
 +    else 
 +    {
 +        for (i=0;i<nnhpres;i++) {
 +            for (j=0;j<nh;j++) 
 +            {
 +                MassQ->QPinv[i*nh+j]=0.0;
 +            }
 +        }
 +    }    
 +    return trotter_seq;
 +}
 +
 +real NPT_energy(t_inputrec *ir, t_state *state, t_extmass *MassQ)
 +{
 +    int  i,j,nd,ndj,bmass,qmass,ngtcall;
 +    real ener_npt,reft,eta,kT,tau;
 +    double *ivxi, *ixi;
 +    double *iQinv;
 +    real vol,dbaro,W,Q;
 +    int nh = state->nhchainlength;
 +
 +    ener_npt = 0;
 +    
 +    /* now we compute the contribution of the pressure to the conserved quantity*/
 +    
 +    if (ir->epc==epcMTTK) 
 +    {
 +        /* find the volume, and the kinetic energy of the volume */
 +        
 +        switch (ir->epct) {
 +            
 +        case epctISOTROPIC:
 +            /* contribution from the pressure momenenta */
 +            ener_npt += 0.5*sqr(state->veta)/MassQ->Winv;
 +            
 +            /* contribution from the PV term */
 +            vol = det(state->box);
 +            ener_npt += vol*trace(ir->ref_p)/(DIM*PRESFAC);
 +
 +            break;
 +        case epctANISOTROPIC:
 +            
 +            break;
 +            
 +        case epctSURFACETENSION:
 +            
 +            break;
 +        case epctSEMIISOTROPIC:
 +            
 +            break;
 +        default:
 +            break;
 +        }
 +    }
 +    
 +    if (IR_NPT_TROTTER(ir) || IR_NPH_TROTTER(ir))
 +    {
 +        /* add the energy from the barostat thermostat chain */
 +        for (i=0;i<state->nnhpres;i++) {
 +
 +            /* note -- assumes only one degree of freedom that is thermostatted in barostat */
 +            ivxi = &state->nhpres_vxi[i*nh];
 +            ixi = &state->nhpres_xi[i*nh];
 +            iQinv = &(MassQ->QPinv[i*nh]);
 +            reft = max(ir->opts.ref_t[0],0); /* using 'System' temperature */
 +            kT = BOLTZ * reft;
 +        
 +            for (j=0;j<nh;j++) 
 +            {
 +                if (iQinv[j] > 0)
 +                {
 +                    ener_npt += 0.5*sqr(ivxi[j])/iQinv[j];
 +                    /* contribution from the thermal variable of the NH chain */
 +                    ener_npt += ixi[j]*kT;
 +                }
 +                if (debug) 
 +                {
 +                    fprintf(debug,"P-T-group: %10d Chain %4d ThermV: %15.8f ThermX: %15.8f",i,j,ivxi[j],ixi[j]);
 +                }
 +            }
 +        }
 +    }
 +        
 +    if (ir->etc) 
 +    {
 +        for(i=0; i<ir->opts.ngtc; i++) 
 +        {
 +            ixi = &state->nosehoover_xi[i*nh];
 +            ivxi = &state->nosehoover_vxi[i*nh];
 +            iQinv = &(MassQ->Qinv[i*nh]);
 +            
 +            nd = ir->opts.nrdf[i];
 +            reft = max(ir->opts.ref_t[i],0);
 +            kT = BOLTZ * reft;
 +            
 +            if (nd > 0) 
 +            {
 +                if (IR_NVT_TROTTER(ir))
 +                {
 +                    /* contribution from the thermal momenta of the NH chain */
 +                    for (j=0;j<nh;j++) 
 +                    {
 +                        if (iQinv[j] > 0) {
 +                            ener_npt += 0.5*sqr(ivxi[j])/iQinv[j];
 +                            /* contribution from the thermal variable of the NH chain */
 +                            if (j==0) {
 +                                ndj = nd;
 +                            } 
 +                            else 
 +                            {
 +                                ndj = 1;
 +                            } 
 +                            ener_npt += ndj*ixi[j]*kT;
 +                        }
 +                    }
 +                }
 +                else  /* Other non Trotter temperature NH control  -- no chains yet. */
 +                { 
 +                    ener_npt += 0.5*BOLTZ*nd*sqr(ivxi[0])/iQinv[0];
 +                    ener_npt += nd*ixi[0]*kT;
 +                }
 +            }
 +        }
 +    }
 +    return ener_npt;
 +}
 +
 +static real vrescale_gamdev(int ia, gmx_rng_t rng)
 +/* Gamma distribution, adapted from numerical recipes */
 +{
 +    int j;
 +    real am,e,s,v1,v2,x,y;
 +
 +    if (ia < 6)
 +    {
 +        do
 +        {
 +            x = 1.0;
 +            for(j=1; j<=ia; j++)
 +            {
 +                x *= gmx_rng_uniform_real(rng);
 +            }
 +        }
 +        while (x == 0);
 +        x = -log(x);
 +    }
 +    else
 +    {
 +        do
 +        {
 +            do
 +            {
 +                do
 +                {
 +                    v1 = gmx_rng_uniform_real(rng);
 +                    v2 = 2.0*gmx_rng_uniform_real(rng)-1.0;
 +                }
 +                while (v1*v1 + v2*v2 > 1.0 ||
 +                       v1*v1*GMX_REAL_MAX < 3.0*ia);
 +                /* The last check above ensures that both x (3.0 > 2.0 in s)
 +                 * and the pre-factor for e do not go out of range.
 +                 */
 +                y = v2/v1;
 +                am = ia - 1;
 +                s = sqrt(2.0*am + 1.0);
 +                x = s*y + am;
 +            }
 +            while (x <= 0.0);
 +            e = (1.0 + y*y)*exp(am*log(x/am) - s*y);
 +        }
 +        while (gmx_rng_uniform_real(rng) > e);
 +    }
 +
 +    return x;
 +}
 +
 +static real vrescale_sumnoises(int nn,gmx_rng_t rng)
 +{
 +/*
 + * Returns the sum of n independent gaussian noises squared
 + * (i.e. equivalent to summing the square of the return values
 + * of nn calls to gmx_rng_gaussian_real).xs
 + */
 +  real rr;
 +
 +  if (nn == 0) {
 +    return 0.0;
 +  } else if (nn == 1) {
 +    rr = gmx_rng_gaussian_real(rng);
 +    return rr*rr;
 +  } else if (nn % 2 == 0) {
 +    return 2.0*vrescale_gamdev(nn/2,rng);
 +  } else {
 +    rr = gmx_rng_gaussian_real(rng);
 +    return 2.0*vrescale_gamdev((nn-1)/2,rng) + rr*rr;
 +  }
 +}
 +
 +static real vrescale_resamplekin(real kk,real sigma, int ndeg, real taut,
 +                               gmx_rng_t rng)
 +{
 +/*
 + * Generates a new value for the kinetic energy,
 + * according to Bussi et al JCP (2007), Eq. (A7)
 + * kk:    present value of the kinetic energy of the atoms to be thermalized (in arbitrary units)
 + * sigma: target average value of the kinetic energy (ndeg k_b T/2)  (in the same units as kk)
 + * ndeg:  number of degrees of freedom of the atoms to be thermalized
 + * taut:  relaxation time of the thermostat, in units of 'how often this routine is called'
 + */
 +  real factor,rr;
 +
 +  if (taut > 0.1) {
 +    factor = exp(-1.0/taut);
 +  } else {
 +    factor = 0.0;
 +  }
 +  rr = gmx_rng_gaussian_real(rng);
 +  return
 +    kk +
 +    (1.0 - factor)*(sigma*(vrescale_sumnoises(ndeg-1,rng) + rr*rr)/ndeg - kk) +
 +    2.0*rr*sqrt(kk*sigma/ndeg*(1.0 - factor)*factor);
 +}
 +
 +void vrescale_tcoupl(t_inputrec *ir,gmx_ekindata_t *ekind,real dt,
 +                     double therm_integral[],gmx_rng_t rng)
 +{
 +    t_grpopts *opts;
 +    int    i;
 +    real   Ek,Ek_ref1,Ek_ref,Ek_new; 
 +    
 +    opts = &ir->opts;
 +
 +    for(i=0; (i<opts->ngtc); i++)
 +    {
 +        if (ir->eI == eiVV)
 +        {
 +            Ek = trace(ekind->tcstat[i].ekinf);
 +        }
 +        else
 +        {
 +            Ek = trace(ekind->tcstat[i].ekinh);
 +        }
 +        
 +        if (opts->tau_t[i] > 0 && opts->nrdf[i] > 0 && Ek > 0)
 +        {
 +            Ek_ref1 = 0.5*opts->ref_t[i]*BOLTZ;
 +            Ek_ref  = Ek_ref1*opts->nrdf[i];
 +
 +            Ek_new  = vrescale_resamplekin(Ek,Ek_ref,opts->nrdf[i],
 +                                           opts->tau_t[i]/dt,rng);
 +
 +            /* Analytically Ek_new>=0, but we check for rounding errors */
 +            if (Ek_new <= 0)
 +            {
 +                ekind->tcstat[i].lambda = 0.0;
 +            }
 +            else
 +            {
 +                ekind->tcstat[i].lambda = sqrt(Ek_new/Ek);
 +            }
 +
 +            therm_integral[i] -= Ek_new - Ek;
 +
 +            if (debug)
 +            {
 +                fprintf(debug,"TC: group %d: Ekr %g, Ek %g, Ek_new %g, Lambda: %g\n",
 +                        i,Ek_ref,Ek,Ek_new,ekind->tcstat[i].lambda);
 +            }
 +        }
 +        else
 +        {
 +            ekind->tcstat[i].lambda = 1.0;
 +        }
 +    }
 +}
 +
 +real vrescale_energy(t_grpopts *opts,double therm_integral[])
 +{
 +  int i;
 +  real ener;
 +
 +  ener = 0;
 +  for(i=0; i<opts->ngtc; i++) {
 +    ener += therm_integral[i];
 +  }
 +  
 +  return ener;
 +}
 +
 +void rescale_velocities(gmx_ekindata_t *ekind,t_mdatoms *mdatoms,
 +                        int start,int end,rvec v[])
 +{
 +    t_grp_acc      *gstat;
 +    t_grp_tcstat   *tcstat;
 +    unsigned short *cACC,*cTC;
 +    int  ga,gt,n,d;
 +    real lg;
 +    rvec vrel;
 +
 +    tcstat = ekind->tcstat;
 +    cTC    = mdatoms->cTC;
 +
 +    if (ekind->bNEMD)
 +    {
 +        gstat  = ekind->grpstat;
 +        cACC   = mdatoms->cACC;
 +
 +        ga = 0;
 +        gt = 0;
 +        for(n=start; n<end; n++) 
 +        {
 +            if (cACC) 
 +            {
 +                ga   = cACC[n];
 +            }
 +            if (cTC)
 +            {
 +                gt   = cTC[n];
 +            }
 +            /* Only scale the velocity component relative to the COM velocity */
 +            rvec_sub(v[n],gstat[ga].u,vrel);
 +            lg = tcstat[gt].lambda;
 +            for(d=0; d<DIM; d++)
 +            {
 +                v[n][d] = gstat[ga].u[d] + lg*vrel[d];
 +            }
 +        }
 +    }
 +    else
 +    {
 +        gt = 0;
 +        for(n=start; n<end; n++) 
 +        {
 +            if (cTC)
 +            {
 +                gt   = cTC[n];
 +            }
 +            lg = tcstat[gt].lambda;
 +            for(d=0; d<DIM; d++)
 +            {
 +                v[n][d] *= lg;
 +            }
 +        }
 +    }
 +}
 +
 +
 +/* set target temperatures if we are annealing */
 +void update_annealing_target_temp(t_grpopts *opts,real t)
 +{
 +  int i,j,n,npoints;
 +  real pert,thist=0,x;
 +
 +  for(i=0;i<opts->ngtc;i++) {
 +    npoints = opts->anneal_npoints[i];
 +    switch (opts->annealing[i]) {
 +    case eannNO:
 +      continue;
 +    case  eannPERIODIC:
 +      /* calculate time modulo the period */
 +      pert  = opts->anneal_time[i][npoints-1];
 +      n     = t / pert;
 +      thist = t - n*pert; /* modulo time */
 +      /* Make sure rounding didn't get us outside the interval */
 +      if (fabs(thist-pert) < GMX_REAL_EPS*100)
 +      thist=0;
 +      break;
 +    case eannSINGLE:
 +      thist = t;
 +      break;
 +    default:
 +      gmx_fatal(FARGS,"Death horror in update_annealing_target_temp (i=%d/%d npoints=%d)",i,opts->ngtc,npoints);
 +    }
 +    /* We are doing annealing for this group if we got here, 
 +     * and we have the (relative) time as thist.
 +     * calculate target temp */
 +    j=0;
 +    while ((j < npoints-1) && (thist>(opts->anneal_time[i][j+1])))
 +      j++;
 +    if (j < npoints-1) {
 +      /* Found our position between points j and j+1. 
 +       * Interpolate: x is the amount from j+1, (1-x) from point j 
 +       * First treat possible jumps in temperature as a special case.
 +       */
 +      if ((opts->anneal_time[i][j+1]-opts->anneal_time[i][j]) < GMX_REAL_EPS*100)
 +      opts->ref_t[i]=opts->anneal_temp[i][j+1];
 +      else {
 +      x = ((thist-opts->anneal_time[i][j])/
 +           (opts->anneal_time[i][j+1]-opts->anneal_time[i][j]));
 +      opts->ref_t[i] = x*opts->anneal_temp[i][j+1]+(1-x)*opts->anneal_temp[i][j];
 +      }
 +    }
 +    else {
 +      opts->ref_t[i] = opts->anneal_temp[i][npoints-1];
 +    }
 +  }
 +}
index e6095ebf549d90cdc485eb92f2870dc9926e43ed,0000000000000000000000000000000000000000..e834fec52d3ab4136d37729a7eb85b5f8f240a52
mode 100644,000000..100644
--- /dev/null
@@@ -1,9589 -1,0 +1,9589 @@@
-                     "NOTE: %.1f %% performance was lost due to load imbalance\n"
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + * This file is part of Gromacs        Copyright (c) 1991-2008
 + * David van der Spoel, Erik Lindahl, Berk Hess, University of Groningen.
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the research papers on the package. Check out http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gnomes, ROck Monsters And Chili Sauce
 + */
 +
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <stdio.h>
 +#include <time.h>
 +#include <math.h>
 +#include <string.h>
 +#include <stdlib.h>
 +#include "typedefs.h"
 +#include "smalloc.h"
 +#include "gmx_fatal.h"
 +#include "gmx_fatal_collective.h"
 +#include "vec.h"
 +#include "domdec.h"
 +#include "domdec_network.h"
 +#include "nrnb.h"
 +#include "pbc.h"
 +#include "chargegroup.h"
 +#include "constr.h"
 +#include "mdatoms.h"
 +#include "names.h"
 +#include "pdbio.h"
 +#include "futil.h"
 +#include "force.h"
 +#include "pme.h"
 +#include "pull.h"
 +#include "pull_rotation.h"
 +#include "gmx_wallcycle.h"
 +#include "mdrun.h"
 +#include "nsgrid.h"
 +#include "shellfc.h"
 +#include "mtop_util.h"
 +#include "gmxfio.h"
 +#include "gmx_ga2la.h"
 +#include "gmx_sort.h"
 +#include "macros.h"
 +#include "nbnxn_search.h"
 +#include "bondf.h"
 +#include "gmx_omp_nthreads.h"
 +
 +#ifdef GMX_LIB_MPI
 +#include <mpi.h>
 +#endif
 +#ifdef GMX_THREAD_MPI
 +#include "tmpi.h"
 +#endif
 +
 +#define DDRANK(dd,rank)    (rank)
 +#define DDMASTERRANK(dd)   (dd->masterrank)
 +
 +typedef struct gmx_domdec_master
 +{
 +    /* The cell boundaries */
 +    real **cell_x;
 +    /* The global charge group division */
 +    int  *ncg;     /* Number of home charge groups for each node */
 +    int  *index;   /* Index of nnodes+1 into cg */
 +    int  *cg;      /* Global charge group index */
 +    int  *nat;     /* Number of home atoms for each node. */
 +    int  *ibuf;    /* Buffer for communication */
 +    rvec *vbuf;    /* Buffer for state scattering and gathering */
 +} gmx_domdec_master_t;
 +
 +typedef struct
 +{
 +    /* The numbers of charge groups to send and receive for each cell
 +     * that requires communication, the last entry contains the total
 +     * number of atoms that needs to be communicated.
 +     */
 +    int nsend[DD_MAXIZONE+2];
 +    int nrecv[DD_MAXIZONE+2];
 +    /* The charge groups to send */
 +    int *index;
 +    int nalloc;
 +    /* The atom range for non-in-place communication */
 +    int cell2at0[DD_MAXIZONE];
 +    int cell2at1[DD_MAXIZONE];
 +} gmx_domdec_ind_t;
 +
 +typedef struct
 +{
 +    int  np;                   /* Number of grid pulses in this dimension */
 +    int  np_dlb;               /* For dlb, for use with edlbAUTO          */
 +    gmx_domdec_ind_t *ind;     /* The indices to communicate, size np     */
 +    int  np_nalloc;
 +    gmx_bool bInPlace;             /* Can we communicate in place?            */
 +} gmx_domdec_comm_dim_t;
 +
 +typedef struct
 +{
 +    gmx_bool *bCellMin;    /* Temp. var.: is this cell size at the limit     */
 +    real *cell_f;      /* State var.: cell boundaries, box relative      */
 +    real *old_cell_f;  /* Temp. var.: old cell size                      */
 +    real *cell_f_max0; /* State var.: max lower boundary, incl neighbors */
 +    real *cell_f_min1; /* State var.: min upper boundary, incl neighbors */
 +    real *bound_min;   /* Temp. var.: lower limit for cell boundary      */
 +    real *bound_max;   /* Temp. var.: upper limit for cell boundary      */
 +    gmx_bool bLimited;     /* State var.: is DLB limited in this dim and row */
 +    real *buf_ncd;     /* Temp. var.                                     */
 +} gmx_domdec_root_t;
 +
 +#define DD_NLOAD_MAX 9
 +
 +/* Here floats are accurate enough, since these variables
 + * only influence the load balancing, not the actual MD results.
 + */
 +typedef struct
 +{
 +    int  nload;
 +    float *load;
 +    float sum;
 +    float max;
 +    float sum_m;
 +    float cvol_min;
 +    float mdf;
 +    float pme;
 +    int   flags;
 +} gmx_domdec_load_t;
 +
 +typedef struct
 +{
 +    int  nsc;
 +    int  ind_gl;
 +    int  ind;
 +} gmx_cgsort_t;
 +
 +typedef struct
 +{
 +    gmx_cgsort_t *sort;
 +    gmx_cgsort_t *sort2;
 +    int  sort_nalloc;
 +    gmx_cgsort_t *sort_new;
 +    int  sort_new_nalloc;
 +    int  *ibuf;
 +    int  ibuf_nalloc;
 +} gmx_domdec_sort_t;
 +
 +typedef struct
 +{
 +    rvec *v;
 +    int  nalloc;
 +} vec_rvec_t;
 +
 +/* This enum determines the order of the coordinates.
 + * ddnatHOME and ddnatZONE should be first and second,
 + * the others can be ordered as wanted.
 + */
 +enum { ddnatHOME, ddnatZONE, ddnatVSITE, ddnatCON, ddnatNR };
 +
 +enum { edlbAUTO, edlbNO, edlbYES, edlbNR };
 +const char *edlb_names[edlbNR] = { "auto", "no", "yes" };
 +
 +typedef struct
 +{
 +    int  dim;      /* The dimension                                          */
 +    gmx_bool dim_match;/* Tells if DD and PME dims match                         */
 +    int  nslab;    /* The number of PME slabs in this dimension              */
 +    real *slb_dim_f; /* Cell sizes for determining the PME comm. with SLB    */
 +    int  *pp_min;  /* The minimum pp node location, size nslab               */
 +    int  *pp_max;  /* The maximum pp node location,size nslab                */
 +    int  maxshift; /* The maximum shift for coordinate redistribution in PME */
 +} gmx_ddpme_t;
 +
 +typedef struct
 +{
 +    real min0;    /* The minimum bottom of this zone                        */
 +    real max1;    /* The maximum top of this zone                           */
 +    real min1;    /* The minimum top of this zone                           */
 +    real mch0;    /* The maximum bottom communicaton height for this zone   */
 +    real mch1;    /* The maximum top communicaton height for this zone      */
 +    real p1_0;    /* The bottom value of the first cell in this zone        */
 +    real p1_1;    /* The top value of the first cell in this zone           */
 +} gmx_ddzone_t;
 +
 +typedef struct
 +{
 +    gmx_domdec_ind_t ind;
 +    int *ibuf;
 +    int ibuf_nalloc;
 +    vec_rvec_t vbuf;
 +    int nsend;
 +    int nat;
 +    int nsend_zone;
 +} dd_comm_setup_work_t;
 +
 +typedef struct gmx_domdec_comm
 +{
 +    /* All arrays are indexed with 0 to dd->ndim (not Cartesian indexing),
 +     * unless stated otherwise.
 +     */
 +
 +    /* The number of decomposition dimensions for PME, 0: no PME */
 +    int  npmedecompdim;
 +    /* The number of nodes doing PME (PP/PME or only PME) */
 +    int  npmenodes;
 +    int  npmenodes_x;
 +    int  npmenodes_y;
 +    /* The communication setup including the PME only nodes */
 +    gmx_bool bCartesianPP_PME;
 +    ivec ntot;
 +    int  cartpmedim;
 +    int  *pmenodes;          /* size npmenodes                         */
 +    int  *ddindex2simnodeid; /* size npmenodes, only with bCartesianPP
 +                              * but with bCartesianPP_PME              */
 +    gmx_ddpme_t ddpme[2];
 +    
 +    /* The DD particle-particle nodes only */
 +    gmx_bool bCartesianPP;
 +    int  *ddindex2ddnodeid; /* size npmenode, only with bCartesianPP_PME */
 +    
 +    /* The global charge groups */
 +    t_block cgs_gl;
 +
 +    /* Should we sort the cgs */
 +    int  nstSortCG;
 +    gmx_domdec_sort_t *sort;
 +    
 +    /* Are there charge groups? */
 +    gmx_bool bCGs;
 +
 +    /* Are there bonded and multi-body interactions between charge groups? */
 +    gmx_bool bInterCGBondeds;
 +    gmx_bool bInterCGMultiBody;
 +
 +    /* Data for the optional bonded interaction atom communication range */
 +    gmx_bool bBondComm;
 +    t_blocka *cglink;
 +    char *bLocalCG;
 +
 +    /* The DLB option */
 +    int  eDLB;
 +    /* Are we actually using DLB? */
 +    gmx_bool bDynLoadBal;
 +
 +    /* Cell sizes for static load balancing, first index cartesian */
 +    real **slb_frac;
 +
 +    /* The width of the communicated boundaries */
 +    real cutoff_mbody;
 +    real cutoff;
 +    /* The minimum cell size (including triclinic correction) */
 +    rvec cellsize_min;
 +    /* For dlb, for use with edlbAUTO */
 +    rvec cellsize_min_dlb;
 +    /* The lower limit for the DD cell size with DLB */
 +    real cellsize_limit;
 +    /* Effectively no NB cut-off limit with DLB for systems without PBC? */
 +    gmx_bool bVacDLBNoLimit;
 +
 +    /* tric_dir is only stored here because dd_get_ns_ranges needs it */
 +    ivec tric_dir;
 +    /* box0 and box_size are required with dim's without pbc and -gcom */
 +    rvec box0;
 +    rvec box_size;
 +    
 +    /* The cell boundaries */
 +    rvec cell_x0;
 +    rvec cell_x1;
 +
 +    /* The old location of the cell boundaries, to check cg displacements */
 +    rvec old_cell_x0;
 +    rvec old_cell_x1;
 +
 +    /* The communication setup and charge group boundaries for the zones */
 +    gmx_domdec_zones_t zones;
 +    
 +    /* The zone limits for DD dimensions 1 and 2 (not 0), determined from
 +     * cell boundaries of neighboring cells for dynamic load balancing.
 +     */
 +    gmx_ddzone_t zone_d1[2];
 +    gmx_ddzone_t zone_d2[2][2];
 +    
 +    /* The coordinate/force communication setup and indices */
 +    gmx_domdec_comm_dim_t cd[DIM];
 +    /* The maximum number of cells to communicate with in one dimension */
 +    int  maxpulse;
 +    
 +    /* Which cg distribution is stored on the master node */
 +    int master_cg_ddp_count;
 +    
 +    /* The number of cg's received from the direct neighbors */
 +    int  zone_ncg1[DD_MAXZONE];
 +    
 +    /* The atom counts, the range for each type t is nat[t-1] <= at < nat[t] */
 +    int  nat[ddnatNR];
 +
 +    /* Array for signalling if atoms have moved to another domain */
 +    int  *moved;
 +    int  moved_nalloc;
 +    
 +    /* Communication buffer for general use */
 +    int  *buf_int;
 +    int  nalloc_int;
 +
 +    /* Communication buffer for general use */
 +    vec_rvec_t vbuf;
 +
 +    /* Temporary storage for thread parallel communication setup */
 +    int nth;
 +    dd_comm_setup_work_t *dth;
 +
 +    /* Communication buffers only used with multiple grid pulses */
 +    int  *buf_int2;
 +    int  nalloc_int2;
 +    vec_rvec_t vbuf2;
 +    
 +    /* Communication buffers for local redistribution */
 +    int  **cggl_flag;
 +    int  cggl_flag_nalloc[DIM*2];
 +    rvec **cgcm_state;
 +    int  cgcm_state_nalloc[DIM*2];
 +    
 +    /* Cell sizes for dynamic load balancing */
 +    gmx_domdec_root_t **root;
 +    real *cell_f_row;
 +    real cell_f0[DIM];
 +    real cell_f1[DIM];
 +    real cell_f_max0[DIM];
 +    real cell_f_min1[DIM];
 +    
 +    /* Stuff for load communication */
 +    gmx_bool bRecordLoad;
 +    gmx_domdec_load_t *load;
 +#ifdef GMX_MPI
 +    MPI_Comm *mpi_comm_load;
 +#endif
 +
 +    /* Maximum DLB scaling per load balancing step in percent */
 +    int dlb_scale_lim;
 +
 +    /* Cycle counters */
 +    float cycl[ddCyclNr];
 +    int   cycl_n[ddCyclNr];
 +    float cycl_max[ddCyclNr];
 +    /* Flop counter (0=no,1=yes,2=with (eFlop-1)*5% noise */
 +    int eFlop;
 +    double flop;
 +    int    flop_n;
 +    /* Have often have did we have load measurements */
 +    int    n_load_have;
 +    /* Have often have we collected the load measurements */
 +    int    n_load_collect;
 +    
 +    /* Statistics */
 +    double sum_nat[ddnatNR-ddnatZONE];
 +    int    ndecomp;
 +    int    nload;
 +    double load_step;
 +    double load_sum;
 +    double load_max;
 +    ivec   load_lim;
 +    double load_mdf;
 +    double load_pme;
 +
 +    /* The last partition step */
 +    gmx_large_int_t partition_step;
 +
 +    /* Debugging */
 +    int  nstDDDump;
 +    int  nstDDDumpGrid;
 +    int  DD_debug;
 +} gmx_domdec_comm_t;
 +
 +/* The size per charge group of the cggl_flag buffer in gmx_domdec_comm_t */
 +#define DD_CGIBS 2
 +
 +/* The flags for the cggl_flag buffer in gmx_domdec_comm_t */
 +#define DD_FLAG_NRCG  65535
 +#define DD_FLAG_FW(d) (1<<(16+(d)*2))
 +#define DD_FLAG_BW(d) (1<<(16+(d)*2+1))
 +
 +/* Zone permutation required to obtain consecutive charge groups
 + * for neighbor searching.
 + */
 +static const int zone_perm[3][4] = { {0,0,0,0},{1,0,0,0},{3,0,1,2} };
 +
 +/* dd_zo and dd_zp3/dd_zp2 are set up such that i zones with non-zero
 + * components see only j zones with that component 0.
 + */
 +
 +/* The DD zone order */
 +static const ivec dd_zo[DD_MAXZONE] =
 +  {{0,0,0},{1,0,0},{1,1,0},{0,1,0},{0,1,1},{0,0,1},{1,0,1},{1,1,1}};
 +
 +/* The 3D setup */
 +#define dd_z3n  8
 +#define dd_zp3n 4
 +static const ivec dd_zp3[dd_zp3n] = {{0,0,8},{1,3,6},{2,5,6},{3,5,7}};
 +
 +/* The 2D setup */
 +#define dd_z2n  4
 +#define dd_zp2n 2
 +static const ivec dd_zp2[dd_zp2n] = {{0,0,4},{1,3,4}};
 +
 +/* The 1D setup */
 +#define dd_z1n  2
 +#define dd_zp1n 1
 +static const ivec dd_zp1[dd_zp1n] = {{0,0,2}};
 +
 +/* Factors used to avoid problems due to rounding issues */
 +#define DD_CELL_MARGIN       1.0001
 +#define DD_CELL_MARGIN2      1.00005
 +/* Factor to account for pressure scaling during nstlist steps */
 +#define DD_PRES_SCALE_MARGIN 1.02
 +
 +/* Allowed performance loss before we DLB or warn */
 +#define DD_PERF_LOSS 0.05
 +
 +#define DD_CELL_F_SIZE(dd,di) ((dd)->nc[(dd)->dim[(di)]]+1+(di)*2+1+(di))
 +
 +/* Use separate MPI send and receive commands
 + * when nnodes <= GMX_DD_NNODES_SENDRECV.
 + * This saves memory (and some copying for small nnodes).
 + * For high parallelization scatter and gather calls are used.
 + */
 +#define GMX_DD_NNODES_SENDRECV 4
 +
 +
 +/*
 +#define dd_index(n,i) ((((i)[ZZ]*(n)[YY] + (i)[YY])*(n)[XX]) + (i)[XX])
 +
 +static void index2xyz(ivec nc,int ind,ivec xyz)
 +{
 +  xyz[XX] = ind % nc[XX];
 +  xyz[YY] = (ind / nc[XX]) % nc[YY];
 +  xyz[ZZ] = ind / (nc[YY]*nc[XX]);
 +}
 +*/
 +
 +/* This order is required to minimize the coordinate communication in PME
 + * which uses decomposition in the x direction.
 + */
 +#define dd_index(n,i) ((((i)[XX]*(n)[YY] + (i)[YY])*(n)[ZZ]) + (i)[ZZ])
 +
 +static void ddindex2xyz(ivec nc,int ind,ivec xyz)
 +{
 +    xyz[XX] = ind / (nc[YY]*nc[ZZ]);
 +    xyz[YY] = (ind / nc[ZZ]) % nc[YY];
 +    xyz[ZZ] = ind % nc[ZZ];
 +}
 +
 +static int ddcoord2ddnodeid(gmx_domdec_t *dd,ivec c)
 +{
 +    int ddindex;
 +    int ddnodeid=-1;
 +    
 +    ddindex = dd_index(dd->nc,c);
 +    if (dd->comm->bCartesianPP_PME)
 +    {
 +        ddnodeid = dd->comm->ddindex2ddnodeid[ddindex];
 +    }
 +    else if (dd->comm->bCartesianPP)
 +    {
 +#ifdef GMX_MPI
 +        MPI_Cart_rank(dd->mpi_comm_all,c,&ddnodeid);
 +#endif
 +    }
 +    else
 +    {
 +        ddnodeid = ddindex;
 +    }
 +    
 +    return ddnodeid;
 +}
 +
 +static gmx_bool dynamic_dd_box(gmx_ddbox_t *ddbox,t_inputrec *ir)
 +{
 +    return (ddbox->nboundeddim < DIM || DYNAMIC_BOX(*ir));
 +}
 +
 +int ddglatnr(gmx_domdec_t *dd,int i)
 +{
 +    int atnr;
 +    
 +    if (dd == NULL)
 +    {
 +        atnr = i + 1;
 +    }
 +    else
 +    {
 +        if (i >= dd->comm->nat[ddnatNR-1])
 +        {
 +            gmx_fatal(FARGS,"glatnr called with %d, which is larger than the local number of atoms (%d)",i,dd->comm->nat[ddnatNR-1]);
 +        }
 +        atnr = dd->gatindex[i] + 1;
 +    }
 +    
 +    return atnr;
 +}
 +
 +t_block *dd_charge_groups_global(gmx_domdec_t *dd)
 +{
 +    return &dd->comm->cgs_gl;
 +}
 +
 +static void vec_rvec_init(vec_rvec_t *v)
 +{
 +    v->nalloc = 0;
 +    v->v      = NULL;
 +}
 +
 +static void vec_rvec_check_alloc(vec_rvec_t *v,int n)
 +{
 +    if (n > v->nalloc)
 +    {
 +        v->nalloc = over_alloc_dd(n);
 +        srenew(v->v,v->nalloc);
 +    }
 +}
 +
 +void dd_store_state(gmx_domdec_t *dd,t_state *state)
 +{
 +    int i;
 +    
 +    if (state->ddp_count != dd->ddp_count)
 +    {
 +        gmx_incons("The state does not the domain decomposition state");
 +    }
 +    
 +    state->ncg_gl = dd->ncg_home;
 +    if (state->ncg_gl > state->cg_gl_nalloc)
 +    {
 +        state->cg_gl_nalloc = over_alloc_dd(state->ncg_gl);
 +        srenew(state->cg_gl,state->cg_gl_nalloc);
 +    }
 +    for(i=0; i<state->ncg_gl; i++)
 +    {
 +        state->cg_gl[i] = dd->index_gl[i];
 +    }
 +    
 +    state->ddp_count_cg_gl = dd->ddp_count;
 +}
 +
 +gmx_domdec_zones_t *domdec_zones(gmx_domdec_t *dd)
 +{
 +    return &dd->comm->zones;
 +}
 +
 +void dd_get_ns_ranges(gmx_domdec_t *dd,int icg,
 +                      int *jcg0,int *jcg1,ivec shift0,ivec shift1)
 +{
 +    gmx_domdec_zones_t *zones;
 +    int izone,d,dim;
 +
 +    zones = &dd->comm->zones;
 +
 +    izone = 0;
 +    while (icg >= zones->izone[izone].cg1)
 +    {
 +        izone++;
 +    }
 +    
 +    if (izone == 0)
 +    {
 +        *jcg0 = icg;
 +    }
 +    else if (izone < zones->nizone)
 +    {
 +        *jcg0 = zones->izone[izone].jcg0;
 +    }
 +    else
 +    {
 +        gmx_fatal(FARGS,"DD icg %d out of range: izone (%d) >= nizone (%d)",
 +                  icg,izone,zones->nizone);
 +    }
 +        
 +    *jcg1 = zones->izone[izone].jcg1;
 +    
 +    for(d=0; d<dd->ndim; d++)
 +    {
 +        dim = dd->dim[d];
 +        shift0[dim] = zones->izone[izone].shift0[dim];
 +        shift1[dim] = zones->izone[izone].shift1[dim];
 +        if (dd->comm->tric_dir[dim] || (dd->bGridJump && d > 0))
 +        {
 +            /* A conservative approach, this can be optimized */
 +            shift0[dim] -= 1;
 +            shift1[dim] += 1;
 +        }
 +    }
 +}
 +
 +int dd_natoms_vsite(gmx_domdec_t *dd)
 +{
 +    return dd->comm->nat[ddnatVSITE];
 +}
 +
 +void dd_get_constraint_range(gmx_domdec_t *dd,int *at_start,int *at_end)
 +{
 +    *at_start = dd->comm->nat[ddnatCON-1];
 +    *at_end   = dd->comm->nat[ddnatCON];
 +}
 +
 +void dd_move_x(gmx_domdec_t *dd,matrix box,rvec x[])
 +{
 +    int  nzone,nat_tot,n,d,p,i,j,at0,at1,zone;
 +    int  *index,*cgindex;
 +    gmx_domdec_comm_t *comm;
 +    gmx_domdec_comm_dim_t *cd;
 +    gmx_domdec_ind_t *ind;
 +    rvec shift={0,0,0},*buf,*rbuf;
 +    gmx_bool bPBC,bScrew;
 +    
 +    comm = dd->comm;
 +    
 +    cgindex = dd->cgindex;
 +    
 +    buf = comm->vbuf.v;
 +
 +    nzone = 1;
 +    nat_tot = dd->nat_home;
 +    for(d=0; d<dd->ndim; d++)
 +    {
 +        bPBC   = (dd->ci[dd->dim[d]] == 0);
 +        bScrew = (bPBC && dd->bScrewPBC && dd->dim[d] == XX);
 +        if (bPBC)
 +        {
 +            copy_rvec(box[dd->dim[d]],shift);
 +        }
 +        cd = &comm->cd[d];
 +        for(p=0; p<cd->np; p++)
 +        {
 +            ind = &cd->ind[p];
 +            index = ind->index;
 +            n = 0;
 +            if (!bPBC)
 +            {
 +                for(i=0; i<ind->nsend[nzone]; i++)
 +                {
 +                    at0 = cgindex[index[i]];
 +                    at1 = cgindex[index[i]+1];
 +                    for(j=at0; j<at1; j++)
 +                    {
 +                        copy_rvec(x[j],buf[n]);
 +                        n++;
 +                    }
 +                }
 +            }
 +            else if (!bScrew)
 +            {
 +                for(i=0; i<ind->nsend[nzone]; i++)
 +                {
 +                    at0 = cgindex[index[i]];
 +                    at1 = cgindex[index[i]+1];
 +                    for(j=at0; j<at1; j++)
 +                    {
 +                        /* We need to shift the coordinates */
 +                        rvec_add(x[j],shift,buf[n]);
 +                        n++;
 +                    }
 +                }
 +            }
 +            else
 +            {
 +                for(i=0; i<ind->nsend[nzone]; i++)
 +                {
 +                    at0 = cgindex[index[i]];
 +                    at1 = cgindex[index[i]+1];
 +                    for(j=at0; j<at1; j++)
 +                    {
 +                        /* Shift x */
 +                        buf[n][XX] = x[j][XX] + shift[XX];
 +                        /* Rotate y and z.
 +                         * This operation requires a special shift force
 +                         * treatment, which is performed in calc_vir.
 +                         */
 +                        buf[n][YY] = box[YY][YY] - x[j][YY];
 +                        buf[n][ZZ] = box[ZZ][ZZ] - x[j][ZZ];
 +                        n++;
 +                    }
 +                }
 +            }
 +            
 +            if (cd->bInPlace)
 +            {
 +                rbuf = x + nat_tot;
 +            }
 +            else
 +            {
 +                rbuf = comm->vbuf2.v;
 +            }
 +            /* Send and receive the coordinates */
 +            dd_sendrecv_rvec(dd, d, dddirBackward,
 +                             buf,  ind->nsend[nzone+1],
 +                             rbuf, ind->nrecv[nzone+1]);
 +            if (!cd->bInPlace)
 +            {
 +                j = 0;
 +                for(zone=0; zone<nzone; zone++)
 +                {
 +                    for(i=ind->cell2at0[zone]; i<ind->cell2at1[zone]; i++)
 +                    {
 +                        copy_rvec(rbuf[j],x[i]);
 +                        j++;
 +                    }
 +                }
 +            }
 +            nat_tot += ind->nrecv[nzone+1];
 +        }
 +        nzone += nzone;
 +    }
 +}
 +
 +void dd_move_f(gmx_domdec_t *dd,rvec f[],rvec *fshift)
 +{
 +    int  nzone,nat_tot,n,d,p,i,j,at0,at1,zone;
 +    int  *index,*cgindex;
 +    gmx_domdec_comm_t *comm;
 +    gmx_domdec_comm_dim_t *cd;
 +    gmx_domdec_ind_t *ind;
 +    rvec *buf,*sbuf;
 +    ivec vis;
 +    int  is;
 +    gmx_bool bPBC,bScrew;
 +    
 +    comm = dd->comm;
 +    
 +    cgindex = dd->cgindex;
 +
 +    buf = comm->vbuf.v;
 +
 +    n = 0;
 +    nzone = comm->zones.n/2;
 +    nat_tot = dd->nat_tot;
 +    for(d=dd->ndim-1; d>=0; d--)
 +    {
 +        bPBC   = (dd->ci[dd->dim[d]] == 0);
 +        bScrew = (bPBC && dd->bScrewPBC && dd->dim[d] == XX);
 +        if (fshift == NULL && !bScrew)
 +        {
 +            bPBC = FALSE;
 +        }
 +        /* Determine which shift vector we need */
 +        clear_ivec(vis);
 +        vis[dd->dim[d]] = 1;
 +        is = IVEC2IS(vis);
 +        
 +        cd = &comm->cd[d];
 +        for(p=cd->np-1; p>=0; p--) {
 +            ind = &cd->ind[p];
 +            nat_tot -= ind->nrecv[nzone+1];
 +            if (cd->bInPlace)
 +            {
 +                sbuf = f + nat_tot;
 +            }
 +            else
 +            {
 +                sbuf = comm->vbuf2.v;
 +                j = 0;
 +                for(zone=0; zone<nzone; zone++)
 +                {
 +                    for(i=ind->cell2at0[zone]; i<ind->cell2at1[zone]; i++)
 +                    {
 +                        copy_rvec(f[i],sbuf[j]);
 +                        j++;
 +                    }
 +                }
 +            }
 +            /* Communicate the forces */
 +            dd_sendrecv_rvec(dd, d, dddirForward,
 +                             sbuf, ind->nrecv[nzone+1],
 +                             buf,  ind->nsend[nzone+1]);
 +            index = ind->index;
 +            /* Add the received forces */
 +            n = 0;
 +            if (!bPBC)
 +            {
 +                for(i=0; i<ind->nsend[nzone]; i++)
 +                {
 +                    at0 = cgindex[index[i]];
 +                    at1 = cgindex[index[i]+1];
 +                    for(j=at0; j<at1; j++)
 +                    {
 +                        rvec_inc(f[j],buf[n]);
 +                        n++;
 +                    }
 +                } 
 +            }
 +            else if (!bScrew)
 +            {
 +                for(i=0; i<ind->nsend[nzone]; i++)
 +                {
 +                    at0 = cgindex[index[i]];
 +                    at1 = cgindex[index[i]+1];
 +                    for(j=at0; j<at1; j++)
 +                    {
 +                        rvec_inc(f[j],buf[n]);
 +                        /* Add this force to the shift force */
 +                        rvec_inc(fshift[is],buf[n]);
 +                        n++;
 +                    }
 +                }
 +            }
 +            else
 +            {
 +                for(i=0; i<ind->nsend[nzone]; i++)
 +                {
 +                    at0 = cgindex[index[i]];
 +                    at1 = cgindex[index[i]+1];
 +                    for(j=at0; j<at1; j++)
 +                    {
 +                        /* Rotate the force */
 +                        f[j][XX] += buf[n][XX];
 +                        f[j][YY] -= buf[n][YY];
 +                        f[j][ZZ] -= buf[n][ZZ];
 +                        if (fshift)
 +                        {
 +                            /* Add this force to the shift force */
 +                            rvec_inc(fshift[is],buf[n]);
 +                        }
 +                        n++;
 +                    }
 +                }
 +            }
 +        }
 +        nzone /= 2;
 +    }
 +}
 +
 +void dd_atom_spread_real(gmx_domdec_t *dd,real v[])
 +{
 +    int  nzone,nat_tot,n,d,p,i,j,at0,at1,zone;
 +    int  *index,*cgindex;
 +    gmx_domdec_comm_t *comm;
 +    gmx_domdec_comm_dim_t *cd;
 +    gmx_domdec_ind_t *ind;
 +    real *buf,*rbuf;
 +    
 +    comm = dd->comm;
 +    
 +    cgindex = dd->cgindex;
 +    
 +    buf = &comm->vbuf.v[0][0];
 +
 +    nzone = 1;
 +    nat_tot = dd->nat_home;
 +    for(d=0; d<dd->ndim; d++)
 +    {
 +        cd = &comm->cd[d];
 +        for(p=0; p<cd->np; p++)
 +        {
 +            ind = &cd->ind[p];
 +            index = ind->index;
 +            n = 0;
 +            for(i=0; i<ind->nsend[nzone]; i++)
 +            {
 +                at0 = cgindex[index[i]];
 +                at1 = cgindex[index[i]+1];
 +                for(j=at0; j<at1; j++)
 +                {
 +                    buf[n] = v[j];
 +                    n++;
 +                }
 +            }
 +            
 +            if (cd->bInPlace)
 +            {
 +                rbuf = v + nat_tot;
 +            }
 +            else
 +            {
 +                rbuf = &comm->vbuf2.v[0][0];
 +            }
 +            /* Send and receive the coordinates */
 +            dd_sendrecv_real(dd, d, dddirBackward,
 +                             buf,  ind->nsend[nzone+1],
 +                             rbuf, ind->nrecv[nzone+1]);
 +            if (!cd->bInPlace)
 +            {
 +                j = 0;
 +                for(zone=0; zone<nzone; zone++)
 +                {
 +                    for(i=ind->cell2at0[zone]; i<ind->cell2at1[zone]; i++)
 +                    {
 +                        v[i] = rbuf[j];
 +                        j++;
 +                    }
 +                }
 +            }
 +            nat_tot += ind->nrecv[nzone+1];
 +        }
 +        nzone += nzone;
 +    }
 +}
 +
 +void dd_atom_sum_real(gmx_domdec_t *dd,real v[])
 +{
 +    int  nzone,nat_tot,n,d,p,i,j,at0,at1,zone;
 +    int  *index,*cgindex;
 +    gmx_domdec_comm_t *comm;
 +    gmx_domdec_comm_dim_t *cd;
 +    gmx_domdec_ind_t *ind;
 +    real *buf,*sbuf;
 +    
 +    comm = dd->comm;
 +    
 +    cgindex = dd->cgindex;
 +
 +    buf = &comm->vbuf.v[0][0];
 +
 +    n = 0;
 +    nzone = comm->zones.n/2;
 +    nat_tot = dd->nat_tot;
 +    for(d=dd->ndim-1; d>=0; d--)
 +    {
 +        cd = &comm->cd[d];
 +        for(p=cd->np-1; p>=0; p--) {
 +            ind = &cd->ind[p];
 +            nat_tot -= ind->nrecv[nzone+1];
 +            if (cd->bInPlace)
 +            {
 +                sbuf = v + nat_tot;
 +            }
 +            else
 +            {
 +                sbuf = &comm->vbuf2.v[0][0];
 +                j = 0;
 +                for(zone=0; zone<nzone; zone++)
 +                {
 +                    for(i=ind->cell2at0[zone]; i<ind->cell2at1[zone]; i++)
 +                    {
 +                        sbuf[j] = v[i];
 +                        j++;
 +                    }
 +                }
 +            }
 +            /* Communicate the forces */
 +            dd_sendrecv_real(dd, d, dddirForward,
 +                             sbuf, ind->nrecv[nzone+1],
 +                             buf,  ind->nsend[nzone+1]);
 +            index = ind->index;
 +            /* Add the received forces */
 +            n = 0;
 +            for(i=0; i<ind->nsend[nzone]; i++)
 +            {
 +                at0 = cgindex[index[i]];
 +                at1 = cgindex[index[i]+1];
 +                for(j=at0; j<at1; j++)
 +                {
 +                    v[j] += buf[n];
 +                    n++;
 +                }
 +            } 
 +        }
 +        nzone /= 2;
 +    }
 +}
 +
 +static void print_ddzone(FILE *fp,int d,int i,int j,gmx_ddzone_t *zone)
 +{
 +    fprintf(fp,"zone d0 %d d1 %d d2 %d  min0 %6.3f max1 %6.3f mch0 %6.3f mch1 %6.3f p1_0 %6.3f p1_1 %6.3f\n",
 +            d,i,j,
 +            zone->min0,zone->max1,
 +            zone->mch0,zone->mch0,
 +            zone->p1_0,zone->p1_1);
 +}
 +
 +
 +#define DDZONECOMM_MAXZONE  5
 +#define DDZONECOMM_BUFSIZE  3
 +
 +static void dd_sendrecv_ddzone(const gmx_domdec_t *dd,
 +                               int ddimind,int direction,
 +                               gmx_ddzone_t *buf_s,int n_s,
 +                               gmx_ddzone_t *buf_r,int n_r)
 +{
 +#define ZBS  DDZONECOMM_BUFSIZE
 +    rvec vbuf_s[DDZONECOMM_MAXZONE*ZBS];
 +    rvec vbuf_r[DDZONECOMM_MAXZONE*ZBS];
 +    int i;
 +
 +    for(i=0; i<n_s; i++)
 +    {
 +        vbuf_s[i*ZBS  ][0] = buf_s[i].min0;
 +        vbuf_s[i*ZBS  ][1] = buf_s[i].max1;
 +        vbuf_s[i*ZBS  ][2] = buf_s[i].min1;
 +        vbuf_s[i*ZBS+1][0] = buf_s[i].mch0;
 +        vbuf_s[i*ZBS+1][1] = buf_s[i].mch1;
 +        vbuf_s[i*ZBS+1][2] = 0;
 +        vbuf_s[i*ZBS+2][0] = buf_s[i].p1_0;
 +        vbuf_s[i*ZBS+2][1] = buf_s[i].p1_1;
 +        vbuf_s[i*ZBS+2][2] = 0;
 +    }
 +
 +    dd_sendrecv_rvec(dd, ddimind, direction,
 +                     vbuf_s, n_s*ZBS,
 +                     vbuf_r, n_r*ZBS);
 +
 +    for(i=0; i<n_r; i++)
 +    {
 +        buf_r[i].min0 = vbuf_r[i*ZBS  ][0];
 +        buf_r[i].max1 = vbuf_r[i*ZBS  ][1];
 +        buf_r[i].min1 = vbuf_r[i*ZBS  ][2];
 +        buf_r[i].mch0 = vbuf_r[i*ZBS+1][0];
 +        buf_r[i].mch1 = vbuf_r[i*ZBS+1][1];
 +        buf_r[i].p1_0 = vbuf_r[i*ZBS+2][0];
 +        buf_r[i].p1_1 = vbuf_r[i*ZBS+2][1];
 +    }
 +
 +#undef ZBS
 +}
 +
 +static void dd_move_cellx(gmx_domdec_t *dd,gmx_ddbox_t *ddbox,
 +                          rvec cell_ns_x0,rvec cell_ns_x1)
 +{
 +    int  d,d1,dim,dim1,pos,buf_size,i,j,k,p,npulse,npulse_min;
 +    gmx_ddzone_t *zp;
 +    gmx_ddzone_t buf_s[DDZONECOMM_MAXZONE];
 +    gmx_ddzone_t buf_r[DDZONECOMM_MAXZONE];
 +    gmx_ddzone_t buf_e[DDZONECOMM_MAXZONE];
 +    rvec extr_s[2],extr_r[2];
 +    rvec dh;
 +    real dist_d,c=0,det;
 +    gmx_domdec_comm_t *comm;
 +    gmx_bool bPBC,bUse;
 +
 +    comm = dd->comm;
 +
 +    for(d=1; d<dd->ndim; d++)
 +    {
 +        dim = dd->dim[d];
 +        zp = (d == 1) ? &comm->zone_d1[0] : &comm->zone_d2[0][0];
 +        zp->min0 = cell_ns_x0[dim];
 +        zp->max1 = cell_ns_x1[dim];
 +        zp->min1 = cell_ns_x1[dim];
 +        zp->mch0 = cell_ns_x0[dim];
 +        zp->mch1 = cell_ns_x1[dim];
 +        zp->p1_0 = cell_ns_x0[dim];
 +        zp->p1_1 = cell_ns_x1[dim];
 +    }
 +    
 +    for(d=dd->ndim-2; d>=0; d--)
 +    {
 +        dim  = dd->dim[d];
 +        bPBC = (dim < ddbox->npbcdim);
 +
 +        /* Use an rvec to store two reals */
 +        extr_s[d][0] = comm->cell_f0[d+1];
 +        extr_s[d][1] = comm->cell_f1[d+1];
 +        extr_s[d][2] = comm->cell_f1[d+1];
 +
 +        pos = 0;
 +        /* Store the extremes in the backward sending buffer,
 +         * so the get updated separately from the forward communication.
 +         */
 +        for(d1=d; d1<dd->ndim-1; d1++)
 +        {
 +            /* We invert the order to be able to use the same loop for buf_e */
 +            buf_s[pos].min0 = extr_s[d1][1];
 +            buf_s[pos].max1 = extr_s[d1][0];
 +            buf_s[pos].min1 = extr_s[d1][2];
 +            buf_s[pos].mch0 = 0;
 +            buf_s[pos].mch1 = 0;
 +            /* Store the cell corner of the dimension we communicate along */
 +            buf_s[pos].p1_0 = comm->cell_x0[dim];
 +            buf_s[pos].p1_1 = 0;
 +            pos++;
 +        }
 +
 +        buf_s[pos] = (dd->ndim == 2) ? comm->zone_d1[0] : comm->zone_d2[0][0];
 +        pos++;
 +
 +        if (dd->ndim == 3 && d == 0)
 +        {
 +            buf_s[pos] = comm->zone_d2[0][1];
 +            pos++;
 +            buf_s[pos] = comm->zone_d1[0];
 +            pos++;
 +        }
 +
 +        /* We only need to communicate the extremes
 +         * in the forward direction
 +         */
 +        npulse = comm->cd[d].np;
 +        if (bPBC)
 +        {
 +            /* Take the minimum to avoid double communication */
 +            npulse_min = min(npulse,dd->nc[dim]-1-npulse);
 +        }
 +        else
 +        {
 +            /* Without PBC we should really not communicate over
 +             * the boundaries, but implementing that complicates
 +             * the communication setup and therefore we simply
 +             * do all communication, but ignore some data.
 +             */
 +            npulse_min = npulse;
 +        }
 +        for(p=0; p<npulse_min; p++)
 +        {
 +            /* Communicate the extremes forward */
 +            bUse = (bPBC || dd->ci[dim] > 0);
 +
 +            dd_sendrecv_rvec(dd, d, dddirForward,
 +                             extr_s+d, dd->ndim-d-1,
 +                             extr_r+d, dd->ndim-d-1);
 +
 +            if (bUse)
 +            {
 +                for(d1=d; d1<dd->ndim-1; d1++)
 +                {
 +                    extr_s[d1][0] = max(extr_s[d1][0],extr_r[d1][0]);
 +                    extr_s[d1][1] = min(extr_s[d1][1],extr_r[d1][1]);
 +                    extr_s[d1][2] = min(extr_s[d1][2],extr_r[d1][2]);
 +                }
 +            }
 +        }
 +
 +        buf_size = pos;
 +        for(p=0; p<npulse; p++)
 +        {
 +            /* Communicate all the zone information backward */
 +            bUse = (bPBC || dd->ci[dim] < dd->nc[dim] - 1);
 +
 +            dd_sendrecv_ddzone(dd, d, dddirBackward,
 +                               buf_s, buf_size,
 +                               buf_r, buf_size);
 +
 +            clear_rvec(dh);
 +            if (p > 0)
 +            {
 +                for(d1=d+1; d1<dd->ndim; d1++)
 +                {
 +                    /* Determine the decrease of maximum required
 +                     * communication height along d1 due to the distance along d,
 +                     * this avoids a lot of useless atom communication.
 +                     */
 +                    dist_d = comm->cell_x1[dim] - buf_r[0].p1_0;
 +
 +                    if (ddbox->tric_dir[dim])
 +                    {
 +                        /* c is the off-diagonal coupling between the cell planes
 +                         * along directions d and d1.
 +                         */
 +                        c = ddbox->v[dim][dd->dim[d1]][dim];
 +                    }
 +                    else
 +                    {
 +                        c = 0;
 +                    }
 +                    det = (1 + c*c)*comm->cutoff*comm->cutoff - dist_d*dist_d;
 +                    if (det > 0)
 +                    {
 +                        dh[d1] = comm->cutoff - (c*dist_d + sqrt(det))/(1 + c*c);
 +                    }
 +                    else
 +                    {
 +                        /* A negative value signals out of range */
 +                        dh[d1] = -1;
 +                    }
 +                }
 +            }
 +
 +            /* Accumulate the extremes over all pulses */
 +            for(i=0; i<buf_size; i++)
 +            {
 +                if (p == 0)
 +                {
 +                    buf_e[i] = buf_r[i];
 +                }
 +                else
 +                {
 +                    if (bUse)
 +                    {
 +                        buf_e[i].min0 = min(buf_e[i].min0,buf_r[i].min0);
 +                        buf_e[i].max1 = max(buf_e[i].max1,buf_r[i].max1);
 +                        buf_e[i].min1 = min(buf_e[i].min1,buf_r[i].min1);
 +                    }
 +
 +                    if (dd->ndim == 3 && d == 0 && i == buf_size - 1)
 +                    {
 +                        d1 = 1;
 +                    }
 +                    else
 +                    {
 +                        d1 = d + 1;
 +                    }
 +                    if (bUse && dh[d1] >= 0)
 +                    {
 +                        buf_e[i].mch0 = max(buf_e[i].mch0,buf_r[i].mch0-dh[d1]);
 +                        buf_e[i].mch1 = max(buf_e[i].mch1,buf_r[i].mch1-dh[d1]);
 +                    }
 +                }
 +                /* Copy the received buffer to the send buffer,
 +                 * to pass the data through with the next pulse.
 +                 */
 +                buf_s[i] = buf_r[i];
 +            }
 +            if (((bPBC || dd->ci[dim]+npulse < dd->nc[dim]) && p == npulse-1) ||
 +                (!bPBC && dd->ci[dim]+1+p == dd->nc[dim]-1))
 +            {
 +                /* Store the extremes */ 
 +                pos = 0;
 +
 +                for(d1=d; d1<dd->ndim-1; d1++)
 +                {
 +                    extr_s[d1][1] = min(extr_s[d1][1],buf_e[pos].min0);
 +                    extr_s[d1][0] = max(extr_s[d1][0],buf_e[pos].max1);
 +                    extr_s[d1][2] = min(extr_s[d1][2],buf_e[pos].min1);
 +                    pos++;
 +                }
 +
 +                if (d == 1 || (d == 0 && dd->ndim == 3))
 +                {
 +                    for(i=d; i<2; i++)
 +                    {
 +                        comm->zone_d2[1-d][i] = buf_e[pos];
 +                        pos++;
 +                    }
 +                }
 +                if (d == 0)
 +                {
 +                    comm->zone_d1[1] = buf_e[pos];
 +                    pos++;
 +                }
 +            }
 +        }
 +    }
 +    
 +    if (dd->ndim >= 2)
 +    {
 +        dim = dd->dim[1];
 +        for(i=0; i<2; i++)
 +        {
 +            if (debug)
 +            {
 +                print_ddzone(debug,1,i,0,&comm->zone_d1[i]);
 +            }
 +            cell_ns_x0[dim] = min(cell_ns_x0[dim],comm->zone_d1[i].min0);
 +            cell_ns_x1[dim] = max(cell_ns_x1[dim],comm->zone_d1[i].max1);
 +        }
 +    }
 +    if (dd->ndim >= 3)
 +    {
 +        dim = dd->dim[2];
 +        for(i=0; i<2; i++)
 +        {
 +            for(j=0; j<2; j++)
 +            {
 +                if (debug)
 +                {
 +                    print_ddzone(debug,2,i,j,&comm->zone_d2[i][j]);
 +                }
 +                cell_ns_x0[dim] = min(cell_ns_x0[dim],comm->zone_d2[i][j].min0);
 +                cell_ns_x1[dim] = max(cell_ns_x1[dim],comm->zone_d2[i][j].max1);
 +            }
 +        }
 +    }
 +    for(d=1; d<dd->ndim; d++)
 +    {
 +        comm->cell_f_max0[d] = extr_s[d-1][0];
 +        comm->cell_f_min1[d] = extr_s[d-1][1];
 +        if (debug)
 +        {
 +            fprintf(debug,"Cell fraction d %d, max0 %f, min1 %f\n",
 +                    d,comm->cell_f_max0[d],comm->cell_f_min1[d]);
 +        }
 +    }
 +}
 +
 +static void dd_collect_cg(gmx_domdec_t *dd,
 +                          t_state *state_local)
 +{
 +    gmx_domdec_master_t *ma=NULL;
 +    int buf2[2],*ibuf,i,ncg_home=0,*cg=NULL,nat_home=0;
 +    t_block *cgs_gl;
 +
 +    if (state_local->ddp_count == dd->comm->master_cg_ddp_count)
 +    {
 +        /* The master has the correct distribution */
 +        return;
 +    }
 +    
 +    if (state_local->ddp_count == dd->ddp_count)
 +    {
 +        ncg_home = dd->ncg_home;
 +        cg       = dd->index_gl;
 +        nat_home = dd->nat_home;
 +    } 
 +    else if (state_local->ddp_count_cg_gl == state_local->ddp_count)
 +    {
 +        cgs_gl = &dd->comm->cgs_gl;
 +
 +        ncg_home = state_local->ncg_gl;
 +        cg       = state_local->cg_gl;
 +        nat_home = 0;
 +        for(i=0; i<ncg_home; i++)
 +        {
 +            nat_home += cgs_gl->index[cg[i]+1] - cgs_gl->index[cg[i]];
 +        }
 +    }
 +    else
 +    {
 +        gmx_incons("Attempted to collect a vector for a state for which the charge group distribution is unknown");
 +    }
 +    
 +    buf2[0] = dd->ncg_home;
 +    buf2[1] = dd->nat_home;
 +    if (DDMASTER(dd))
 +    {
 +        ma = dd->ma;
 +        ibuf = ma->ibuf;
 +    }
 +    else
 +    {
 +        ibuf = NULL;
 +    }
 +    /* Collect the charge group and atom counts on the master */
 +    dd_gather(dd,2*sizeof(int),buf2,ibuf);
 +    
 +    if (DDMASTER(dd))
 +    {
 +        ma->index[0] = 0;
 +        for(i=0; i<dd->nnodes; i++)
 +        {
 +            ma->ncg[i] = ma->ibuf[2*i];
 +            ma->nat[i] = ma->ibuf[2*i+1];
 +            ma->index[i+1] = ma->index[i] + ma->ncg[i];
 +            
 +        }
 +        /* Make byte counts and indices */
 +        for(i=0; i<dd->nnodes; i++)
 +        {
 +            ma->ibuf[i] = ma->ncg[i]*sizeof(int);
 +            ma->ibuf[dd->nnodes+i] = ma->index[i]*sizeof(int);
 +        }
 +        if (debug)
 +        {
 +            fprintf(debug,"Initial charge group distribution: ");
 +            for(i=0; i<dd->nnodes; i++)
 +                fprintf(debug," %d",ma->ncg[i]);
 +            fprintf(debug,"\n");
 +        }
 +    }
 +    
 +    /* Collect the charge group indices on the master */
 +    dd_gatherv(dd,
 +               dd->ncg_home*sizeof(int),dd->index_gl,
 +               DDMASTER(dd) ? ma->ibuf : NULL,
 +               DDMASTER(dd) ? ma->ibuf+dd->nnodes : NULL,
 +               DDMASTER(dd) ? ma->cg : NULL);
 +    
 +    dd->comm->master_cg_ddp_count = state_local->ddp_count;
 +}
 +
 +static void dd_collect_vec_sendrecv(gmx_domdec_t *dd,
 +                                    rvec *lv,rvec *v)
 +{
 +    gmx_domdec_master_t *ma;
 +    int  n,i,c,a,nalloc=0;
 +    rvec *buf=NULL;
 +    t_block *cgs_gl;
 +
 +    ma = dd->ma;
 +    
 +    if (!DDMASTER(dd))
 +    {
 +#ifdef GMX_MPI
 +        MPI_Send(lv,dd->nat_home*sizeof(rvec),MPI_BYTE,DDMASTERRANK(dd),
 +                 dd->rank,dd->mpi_comm_all);
 +#endif
 +    } else {
 +        /* Copy the master coordinates to the global array */
 +        cgs_gl = &dd->comm->cgs_gl;
 +
 +        n = DDMASTERRANK(dd);
 +        a = 0;
 +        for(i=ma->index[n]; i<ma->index[n+1]; i++)
 +        {
 +            for(c=cgs_gl->index[ma->cg[i]]; c<cgs_gl->index[ma->cg[i]+1]; c++)
 +            {
 +                copy_rvec(lv[a++],v[c]);
 +            }
 +        }
 +        
 +        for(n=0; n<dd->nnodes; n++)
 +        {
 +            if (n != dd->rank)
 +            {
 +                if (ma->nat[n] > nalloc)
 +                {
 +                    nalloc = over_alloc_dd(ma->nat[n]);
 +                    srenew(buf,nalloc);
 +                }
 +#ifdef GMX_MPI
 +                MPI_Recv(buf,ma->nat[n]*sizeof(rvec),MPI_BYTE,DDRANK(dd,n),
 +                         n,dd->mpi_comm_all,MPI_STATUS_IGNORE);
 +#endif
 +                a = 0;
 +                for(i=ma->index[n]; i<ma->index[n+1]; i++)
 +                {
 +                    for(c=cgs_gl->index[ma->cg[i]]; c<cgs_gl->index[ma->cg[i]+1]; c++)
 +                    {
 +                        copy_rvec(buf[a++],v[c]);
 +                    }
 +                }
 +            }
 +        }
 +        sfree(buf);
 +    }
 +}
 +
 +static void get_commbuffer_counts(gmx_domdec_t *dd,
 +                                  int **counts,int **disps)
 +{
 +    gmx_domdec_master_t *ma;
 +    int n;
 +
 +    ma = dd->ma;
 +    
 +    /* Make the rvec count and displacment arrays */
 +    *counts  = ma->ibuf;
 +    *disps   = ma->ibuf + dd->nnodes;
 +    for(n=0; n<dd->nnodes; n++)
 +    {
 +        (*counts)[n] = ma->nat[n]*sizeof(rvec);
 +        (*disps)[n]  = (n == 0 ? 0 : (*disps)[n-1] + (*counts)[n-1]);
 +    }
 +}
 +
 +static void dd_collect_vec_gatherv(gmx_domdec_t *dd,
 +                                   rvec *lv,rvec *v)
 +{
 +    gmx_domdec_master_t *ma;
 +    int  *rcounts=NULL,*disps=NULL;
 +    int  n,i,c,a;
 +    rvec *buf=NULL;
 +    t_block *cgs_gl;
 +    
 +    ma = dd->ma;
 +    
 +    if (DDMASTER(dd))
 +    {
 +        get_commbuffer_counts(dd,&rcounts,&disps);
 +
 +        buf = ma->vbuf;
 +    }
 +    
 +    dd_gatherv(dd,dd->nat_home*sizeof(rvec),lv,rcounts,disps,buf);
 +
 +    if (DDMASTER(dd))
 +    {
 +        cgs_gl = &dd->comm->cgs_gl;
 +
 +        a = 0;
 +        for(n=0; n<dd->nnodes; n++)
 +        {
 +            for(i=ma->index[n]; i<ma->index[n+1]; i++)
 +            {
 +                for(c=cgs_gl->index[ma->cg[i]]; c<cgs_gl->index[ma->cg[i]+1]; c++)
 +                {
 +                    copy_rvec(buf[a++],v[c]);
 +                }
 +            }
 +        }
 +    }
 +}
 +
 +void dd_collect_vec(gmx_domdec_t *dd,
 +                    t_state *state_local,rvec *lv,rvec *v)
 +{
 +    gmx_domdec_master_t *ma;
 +    int  n,i,c,a,nalloc=0;
 +    rvec *buf=NULL;
 +    
 +    dd_collect_cg(dd,state_local);
 +
 +    if (dd->nnodes <= GMX_DD_NNODES_SENDRECV)
 +    {
 +        dd_collect_vec_sendrecv(dd,lv,v);
 +    }
 +    else
 +    {
 +        dd_collect_vec_gatherv(dd,lv,v);
 +    }
 +}
 +
 +
 +void dd_collect_state(gmx_domdec_t *dd,
 +                      t_state *state_local,t_state *state)
 +{
 +    int est,i,j,nh;
 +
 +    nh = state->nhchainlength;
 +
 +    if (DDMASTER(dd))
 +    {
 +        for (i=0;i<efptNR;i++) {
 +            state->lambda[i] = state_local->lambda[i];
 +        }
 +        state->fep_state = state_local->fep_state;
 +        state->veta = state_local->veta;
 +        state->vol0 = state_local->vol0;
 +        copy_mat(state_local->box,state->box);
 +        copy_mat(state_local->boxv,state->boxv);
 +        copy_mat(state_local->svir_prev,state->svir_prev);
 +        copy_mat(state_local->fvir_prev,state->fvir_prev);
 +        copy_mat(state_local->pres_prev,state->pres_prev);
 +
 +
 +        for(i=0; i<state_local->ngtc; i++)
 +        {
 +            for(j=0; j<nh; j++) {
 +                state->nosehoover_xi[i*nh+j]        = state_local->nosehoover_xi[i*nh+j];
 +                state->nosehoover_vxi[i*nh+j]       = state_local->nosehoover_vxi[i*nh+j];
 +            }
 +            state->therm_integral[i] = state_local->therm_integral[i];            
 +        }
 +        for(i=0; i<state_local->nnhpres; i++) 
 +        {
 +            for(j=0; j<nh; j++) {
 +                state->nhpres_xi[i*nh+j]        = state_local->nhpres_xi[i*nh+j];
 +                state->nhpres_vxi[i*nh+j]       = state_local->nhpres_vxi[i*nh+j];
 +            }
 +        }
 +    }
 +    for(est=0; est<estNR; est++)
 +    {
 +        if (EST_DISTR(est) && (state_local->flags & (1<<est)))
 +        {
 +            switch (est) {
 +            case estX:
 +                dd_collect_vec(dd,state_local,state_local->x,state->x);
 +                break;
 +            case estV:
 +                dd_collect_vec(dd,state_local,state_local->v,state->v);
 +                break;
 +            case estSDX:
 +                dd_collect_vec(dd,state_local,state_local->sd_X,state->sd_X);
 +                break;
 +            case estCGP:
 +                dd_collect_vec(dd,state_local,state_local->cg_p,state->cg_p);
 +                break;
 +            case estLD_RNG:
 +                if (state->nrngi == 1)
 +                {
 +                    if (DDMASTER(dd))
 +                    {
 +                        for(i=0; i<state_local->nrng; i++)
 +                        {
 +                            state->ld_rng[i] = state_local->ld_rng[i];
 +                        }
 +                    }
 +                }
 +                else
 +                {
 +                    dd_gather(dd,state_local->nrng*sizeof(state->ld_rng[0]),
 +                              state_local->ld_rng,state->ld_rng);
 +                }
 +                break;
 +            case estLD_RNGI:
 +                if (state->nrngi == 1)
 +                {
 +                   if (DDMASTER(dd))
 +                    {
 +                        state->ld_rngi[0] = state_local->ld_rngi[0];
 +                    } 
 +                }
 +                else
 +                {
 +                    dd_gather(dd,sizeof(state->ld_rngi[0]),
 +                              state_local->ld_rngi,state->ld_rngi);
 +                }
 +                break;
 +            case estDISRE_INITF:
 +            case estDISRE_RM3TAV:
 +            case estORIRE_INITF:
 +            case estORIRE_DTAV:
 +                break;
 +            default:
 +                gmx_incons("Unknown state entry encountered in dd_collect_state");
 +            }
 +        }
 +    }
 +}
 +
 +static void dd_realloc_state(t_state *state,rvec **f,int nalloc)
 +{
 +    int est;
 +
 +    if (debug)
 +    {
 +        fprintf(debug,"Reallocating state: currently %d, required %d, allocating %d\n",state->nalloc,nalloc,over_alloc_dd(nalloc));
 +    }
 +
 +    state->nalloc = over_alloc_dd(nalloc);
 +    
 +    for(est=0; est<estNR; est++)
 +    {
 +        if (EST_DISTR(est) && (state->flags & (1<<est)))
 +        {
 +            switch(est) {
 +            case estX:
 +                srenew(state->x,state->nalloc);
 +                break;
 +            case estV:
 +                srenew(state->v,state->nalloc);
 +                break;
 +            case estSDX:
 +                srenew(state->sd_X,state->nalloc);
 +                break;
 +            case estCGP:
 +                srenew(state->cg_p,state->nalloc);
 +                break;
 +            case estLD_RNG:
 +            case estLD_RNGI:
 +            case estDISRE_INITF:
 +            case estDISRE_RM3TAV:
 +            case estORIRE_INITF:
 +            case estORIRE_DTAV:
 +                /* No reallocation required */
 +                break;
 +            default:
 +                gmx_incons("Unknown state entry encountered in dd_realloc_state");            
 +            }
 +        }
 +    }
 +    
 +    if (f != NULL)
 +    {
 +        srenew(*f,state->nalloc);
 +    }
 +}
 +
 +static void dd_check_alloc_ncg(t_forcerec *fr,t_state *state,rvec **f,
 +                               int nalloc)
 +{
 +    if (nalloc > fr->cg_nalloc)
 +    {
 +        if (debug)
 +        {
 +            fprintf(debug,"Reallocating forcerec: currently %d, required %d, allocating %d\n",fr->cg_nalloc,nalloc,over_alloc_dd(nalloc));
 +        }
 +        fr->cg_nalloc = over_alloc_dd(nalloc);
 +        srenew(fr->cginfo,fr->cg_nalloc);
 +        if (fr->cutoff_scheme == ecutsGROUP)
 +        {
 +            srenew(fr->cg_cm,fr->cg_nalloc);
 +        }
 +    }
 +    if (fr->cutoff_scheme == ecutsVERLET && nalloc > state->nalloc)
 +    {
 +        /* We don't use charge groups, we use x in state to set up
 +         * the atom communication.
 +         */
 +        dd_realloc_state(state,f,nalloc);
 +    }
 +}
 +
 +static void dd_distribute_vec_sendrecv(gmx_domdec_t *dd,t_block *cgs,
 +                                       rvec *v,rvec *lv)
 +{
 +    gmx_domdec_master_t *ma;
 +    int  n,i,c,a,nalloc=0;
 +    rvec *buf=NULL;
 +    
 +    if (DDMASTER(dd))
 +    {
 +        ma  = dd->ma;
 +        
 +        for(n=0; n<dd->nnodes; n++)
 +        {
 +            if (n != dd->rank)
 +            {
 +                if (ma->nat[n] > nalloc)
 +                {
 +                    nalloc = over_alloc_dd(ma->nat[n]);
 +                    srenew(buf,nalloc);
 +                }
 +                /* Use lv as a temporary buffer */
 +                a = 0;
 +                for(i=ma->index[n]; i<ma->index[n+1]; i++)
 +                {
 +                    for(c=cgs->index[ma->cg[i]]; c<cgs->index[ma->cg[i]+1]; c++)
 +                    {
 +                        copy_rvec(v[c],buf[a++]);
 +                    }
 +                }
 +                if (a != ma->nat[n])
 +                {
 +                    gmx_fatal(FARGS,"Internal error a (%d) != nat (%d)",
 +                              a,ma->nat[n]);
 +                }
 +                
 +#ifdef GMX_MPI
 +                MPI_Send(buf,ma->nat[n]*sizeof(rvec),MPI_BYTE,
 +                         DDRANK(dd,n),n,dd->mpi_comm_all);
 +#endif
 +            }
 +        }
 +        sfree(buf);
 +        n = DDMASTERRANK(dd);
 +        a = 0;
 +        for(i=ma->index[n]; i<ma->index[n+1]; i++)
 +        {
 +            for(c=cgs->index[ma->cg[i]]; c<cgs->index[ma->cg[i]+1]; c++)
 +            {
 +                copy_rvec(v[c],lv[a++]);
 +            }
 +        }
 +    }
 +    else
 +    {
 +#ifdef GMX_MPI
 +        MPI_Recv(lv,dd->nat_home*sizeof(rvec),MPI_BYTE,DDMASTERRANK(dd),
 +                 MPI_ANY_TAG,dd->mpi_comm_all,MPI_STATUS_IGNORE);
 +#endif
 +    }
 +}
 +
 +static void dd_distribute_vec_scatterv(gmx_domdec_t *dd,t_block *cgs,
 +                                       rvec *v,rvec *lv)
 +{
 +    gmx_domdec_master_t *ma;
 +    int  *scounts=NULL,*disps=NULL;
 +    int  n,i,c,a,nalloc=0;
 +    rvec *buf=NULL;
 +    
 +    if (DDMASTER(dd))
 +    {
 +        ma  = dd->ma;
 +     
 +        get_commbuffer_counts(dd,&scounts,&disps);
 +
 +        buf = ma->vbuf;
 +        a = 0;
 +        for(n=0; n<dd->nnodes; n++)
 +        {
 +            for(i=ma->index[n]; i<ma->index[n+1]; i++)
 +            {
 +                for(c=cgs->index[ma->cg[i]]; c<cgs->index[ma->cg[i]+1]; c++)
 +                {
 +                    copy_rvec(v[c],buf[a++]);
 +                }
 +            }
 +        }
 +    }
 +
 +    dd_scatterv(dd,scounts,disps,buf,dd->nat_home*sizeof(rvec),lv);
 +}
 +
 +static void dd_distribute_vec(gmx_domdec_t *dd,t_block *cgs,rvec *v,rvec *lv)
 +{
 +    if (dd->nnodes <= GMX_DD_NNODES_SENDRECV)
 +    {
 +        dd_distribute_vec_sendrecv(dd,cgs,v,lv);
 +    }
 +    else
 +    {
 +        dd_distribute_vec_scatterv(dd,cgs,v,lv);
 +    }
 +}
 +
 +static void dd_distribute_state(gmx_domdec_t *dd,t_block *cgs,
 +                                t_state *state,t_state *state_local,
 +                                rvec **f)
 +{
 +    int  i,j,nh;
 +
 +    nh = state->nhchainlength;
 +
 +    if (DDMASTER(dd))
 +    {
 +        for(i=0;i<efptNR;i++)
 +        {
 +            state_local->lambda[i] = state->lambda[i];
 +        }
 +        state_local->fep_state = state->fep_state;
 +        state_local->veta   = state->veta;
 +        state_local->vol0   = state->vol0;
 +        copy_mat(state->box,state_local->box);
 +        copy_mat(state->box_rel,state_local->box_rel);
 +        copy_mat(state->boxv,state_local->boxv);
 +        copy_mat(state->svir_prev,state_local->svir_prev);
 +        copy_mat(state->fvir_prev,state_local->fvir_prev);
 +        for(i=0; i<state_local->ngtc; i++)
 +        {
 +            for(j=0; j<nh; j++) {
 +                state_local->nosehoover_xi[i*nh+j]        = state->nosehoover_xi[i*nh+j];
 +                state_local->nosehoover_vxi[i*nh+j]       = state->nosehoover_vxi[i*nh+j];
 +            }
 +            state_local->therm_integral[i] = state->therm_integral[i];
 +        }
 +        for(i=0; i<state_local->nnhpres; i++)
 +        {
 +            for(j=0; j<nh; j++) {
 +                state_local->nhpres_xi[i*nh+j]        = state->nhpres_xi[i*nh+j];
 +                state_local->nhpres_vxi[i*nh+j]       = state->nhpres_vxi[i*nh+j];
 +            }
 +        }
 +    }
 +    dd_bcast(dd,((efptNR)*sizeof(real)),state_local->lambda);
 +    dd_bcast(dd,sizeof(int),&state_local->fep_state);
 +    dd_bcast(dd,sizeof(real),&state_local->veta);
 +    dd_bcast(dd,sizeof(real),&state_local->vol0);
 +    dd_bcast(dd,sizeof(state_local->box),state_local->box);
 +    dd_bcast(dd,sizeof(state_local->box_rel),state_local->box_rel);
 +    dd_bcast(dd,sizeof(state_local->boxv),state_local->boxv);
 +    dd_bcast(dd,sizeof(state_local->svir_prev),state_local->svir_prev);
 +    dd_bcast(dd,sizeof(state_local->fvir_prev),state_local->fvir_prev);
 +    dd_bcast(dd,((state_local->ngtc*nh)*sizeof(double)),state_local->nosehoover_xi);
 +    dd_bcast(dd,((state_local->ngtc*nh)*sizeof(double)),state_local->nosehoover_vxi);
 +    dd_bcast(dd,state_local->ngtc*sizeof(double),state_local->therm_integral);
 +    dd_bcast(dd,((state_local->nnhpres*nh)*sizeof(double)),state_local->nhpres_xi);
 +    dd_bcast(dd,((state_local->nnhpres*nh)*sizeof(double)),state_local->nhpres_vxi);
 +
 +    if (dd->nat_home > state_local->nalloc)
 +    {
 +        dd_realloc_state(state_local,f,dd->nat_home);
 +    }
 +    for(i=0; i<estNR; i++)
 +    {
 +        if (EST_DISTR(i) && (state_local->flags & (1<<i)))
 +        {
 +            switch (i) {
 +            case estX:
 +                dd_distribute_vec(dd,cgs,state->x,state_local->x);
 +                break;
 +            case estV:
 +                dd_distribute_vec(dd,cgs,state->v,state_local->v);
 +                break;
 +            case estSDX:
 +                dd_distribute_vec(dd,cgs,state->sd_X,state_local->sd_X);
 +                break;
 +            case estCGP:
 +                dd_distribute_vec(dd,cgs,state->cg_p,state_local->cg_p);
 +                break;
 +            case estLD_RNG:
 +                if (state->nrngi == 1)
 +                {
 +                    dd_bcastc(dd,
 +                              state_local->nrng*sizeof(state_local->ld_rng[0]),
 +                              state->ld_rng,state_local->ld_rng);
 +                }
 +                else
 +                {
 +                    dd_scatter(dd,
 +                               state_local->nrng*sizeof(state_local->ld_rng[0]),
 +                               state->ld_rng,state_local->ld_rng);
 +                }
 +                break;
 +            case estLD_RNGI:
 +                if (state->nrngi == 1)
 +                {
 +                    dd_bcastc(dd,sizeof(state_local->ld_rngi[0]),
 +                              state->ld_rngi,state_local->ld_rngi);
 +                }
 +                else
 +                {
 +                     dd_scatter(dd,sizeof(state_local->ld_rngi[0]),
 +                               state->ld_rngi,state_local->ld_rngi);
 +                }   
 +                break;
 +            case estDISRE_INITF:
 +            case estDISRE_RM3TAV:
 +            case estORIRE_INITF:
 +            case estORIRE_DTAV:
 +                /* Not implemented yet */
 +                break;
 +            default:
 +                gmx_incons("Unknown state entry encountered in dd_distribute_state");
 +            }
 +        }
 +    }
 +}
 +
 +static char dim2char(int dim)
 +{
 +    char c='?';
 +    
 +    switch (dim)
 +    {
 +    case XX: c = 'X'; break;
 +    case YY: c = 'Y'; break;
 +    case ZZ: c = 'Z'; break;
 +    default: gmx_fatal(FARGS,"Unknown dim %d",dim);
 +    }
 +    
 +    return c;
 +}
 +
 +static void write_dd_grid_pdb(const char *fn,gmx_large_int_t step,
 +                              gmx_domdec_t *dd,matrix box,gmx_ddbox_t *ddbox)
 +{
 +    rvec grid_s[2],*grid_r=NULL,cx,r;
 +    char fname[STRLEN],format[STRLEN],buf[22];
 +    FILE *out;
 +    int  a,i,d,z,y,x;
 +    matrix tric;
 +    real vol;
 +
 +    copy_rvec(dd->comm->cell_x0,grid_s[0]);
 +    copy_rvec(dd->comm->cell_x1,grid_s[1]);
 +    
 +    if (DDMASTER(dd))
 +    {
 +        snew(grid_r,2*dd->nnodes);
 +    }
 +    
 +    dd_gather(dd,2*sizeof(rvec),grid_s[0],DDMASTER(dd) ? grid_r[0] : NULL);
 +    
 +    if (DDMASTER(dd))
 +    {
 +        for(d=0; d<DIM; d++)
 +        {
 +            for(i=0; i<DIM; i++)
 +            {
 +                if (d == i)
 +                {
 +                    tric[d][i] = 1;
 +                }
 +                else
 +                {
 +                    if (d < ddbox->npbcdim && dd->nc[d] > 1)
 +                    {
 +                        tric[d][i] = box[i][d]/box[i][i];
 +                    }
 +                    else
 +                    {
 +                        tric[d][i] = 0;
 +                    }
 +                }
 +            }
 +        }
 +        sprintf(fname,"%s_%s.pdb",fn,gmx_step_str(step,buf));
 +        sprintf(format,"%s%s\n",get_pdbformat(),"%6.2f%6.2f");
 +        out = gmx_fio_fopen(fname,"w");
 +        gmx_write_pdb_box(out,dd->bScrewPBC ? epbcSCREW : epbcXYZ,box);
 +        a = 1;
 +        for(i=0; i<dd->nnodes; i++)
 +        {
 +            vol = dd->nnodes/(box[XX][XX]*box[YY][YY]*box[ZZ][ZZ]);
 +            for(d=0; d<DIM; d++)
 +            {
 +                vol *= grid_r[i*2+1][d] - grid_r[i*2][d];
 +            }
 +            for(z=0; z<2; z++)
 +            {
 +                for(y=0; y<2; y++)
 +                {
 +                    for(x=0; x<2; x++)
 +                    {
 +                        cx[XX] = grid_r[i*2+x][XX];
 +                        cx[YY] = grid_r[i*2+y][YY];
 +                        cx[ZZ] = grid_r[i*2+z][ZZ];
 +                        mvmul(tric,cx,r);
 +                        fprintf(out,format,"ATOM",a++,"CA","GLY",' ',1+i,
 +                                10*r[XX],10*r[YY],10*r[ZZ],1.0,vol);
 +                    }
 +                }
 +            }
 +            for(d=0; d<DIM; d++)
 +            {
 +                for(x=0; x<4; x++)
 +                {
 +                    switch(d)
 +                    {
 +                    case 0: y = 1 + i*8 + 2*x; break;
 +                    case 1: y = 1 + i*8 + 2*x - (x % 2); break;
 +                    case 2: y = 1 + i*8 + x; break;
 +                    }
 +                    fprintf(out,"%6s%5d%5d\n","CONECT",y,y+(1<<d));
 +                }
 +            }
 +        }
 +        gmx_fio_fclose(out);
 +        sfree(grid_r);
 +    }
 +}
 +
 +void write_dd_pdb(const char *fn,gmx_large_int_t step,const char *title,
 +                  gmx_mtop_t *mtop,t_commrec *cr,
 +                  int natoms,rvec x[],matrix box)
 +{
 +    char fname[STRLEN],format[STRLEN],format4[STRLEN],buf[22];
 +    FILE *out;
 +    int  i,ii,resnr,c;
 +    char *atomname,*resname;
 +    real b;
 +    gmx_domdec_t *dd;
 +    
 +    dd = cr->dd;
 +    if (natoms == -1)
 +    {
 +        natoms = dd->comm->nat[ddnatVSITE];
 +    }
 +    
 +    sprintf(fname,"%s_%s_n%d.pdb",fn,gmx_step_str(step,buf),cr->sim_nodeid);
 +    
 +    sprintf(format,"%s%s\n",get_pdbformat(),"%6.2f%6.2f");
 +    sprintf(format4,"%s%s\n",get_pdbformat4(),"%6.2f%6.2f");
 +    
 +    out = gmx_fio_fopen(fname,"w");
 +    
 +    fprintf(out,"TITLE     %s\n",title);
 +    gmx_write_pdb_box(out,dd->bScrewPBC ? epbcSCREW : epbcXYZ,box);
 +    for(i=0; i<natoms; i++)
 +    {
 +        ii = dd->gatindex[i];
 +        gmx_mtop_atominfo_global(mtop,ii,&atomname,&resnr,&resname);
 +        if (i < dd->comm->nat[ddnatZONE])
 +        {
 +            c = 0;
 +            while (i >= dd->cgindex[dd->comm->zones.cg_range[c+1]])
 +            {
 +                c++;
 +            }
 +            b = c;
 +        }
 +        else if (i < dd->comm->nat[ddnatVSITE])
 +        {
 +            b = dd->comm->zones.n;
 +        }
 +        else
 +        {
 +            b = dd->comm->zones.n + 1;
 +        }
 +        fprintf(out,strlen(atomname)<4 ? format : format4,
 +                "ATOM",(ii+1)%100000,
 +                atomname,resname,' ',resnr%10000,' ',
 +                10*x[i][XX],10*x[i][YY],10*x[i][ZZ],1.0,b);
 +    }
 +    fprintf(out,"TER\n");
 +    
 +    gmx_fio_fclose(out);
 +}
 +
 +real dd_cutoff_mbody(gmx_domdec_t *dd)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int  di;
 +    real r;
 +
 +    comm = dd->comm;
 +
 +    r = -1;
 +    if (comm->bInterCGBondeds)
 +    {
 +        if (comm->cutoff_mbody > 0)
 +        {
 +            r = comm->cutoff_mbody;
 +        }
 +        else
 +        {
 +            /* cutoff_mbody=0 means we do not have DLB */
 +            r = comm->cellsize_min[dd->dim[0]];
 +            for(di=1; di<dd->ndim; di++)
 +            {
 +                r = min(r,comm->cellsize_min[dd->dim[di]]);
 +            }
 +            if (comm->bBondComm)
 +            {
 +                r = max(r,comm->cutoff_mbody);
 +            }
 +            else
 +            {
 +                r = min(r,comm->cutoff);
 +            }
 +        }
 +    }
 +
 +    return r;
 +}
 +
 +real dd_cutoff_twobody(gmx_domdec_t *dd)
 +{
 +    real r_mb;
 +
 +    r_mb = dd_cutoff_mbody(dd);
 +
 +    return max(dd->comm->cutoff,r_mb);
 +}
 +
 +
 +static void dd_cart_coord2pmecoord(gmx_domdec_t *dd,ivec coord,ivec coord_pme)
 +{
 +    int nc,ntot;
 +    
 +    nc   = dd->nc[dd->comm->cartpmedim];
 +    ntot = dd->comm->ntot[dd->comm->cartpmedim];
 +    copy_ivec(coord,coord_pme);
 +    coord_pme[dd->comm->cartpmedim] =
 +        nc + (coord[dd->comm->cartpmedim]*(ntot - nc) + (ntot - nc)/2)/nc;
 +}
 +
 +static int low_ddindex2pmeindex(int ndd,int npme,int ddindex)
 +{
 +    /* Here we assign a PME node to communicate with this DD node
 +     * by assuming that the major index of both is x.
 +     * We add cr->npmenodes/2 to obtain an even distribution.
 +     */
 +    return (ddindex*npme + npme/2)/ndd;
 +}
 +
 +static int ddindex2pmeindex(const gmx_domdec_t *dd,int ddindex)
 +{
 +    return low_ddindex2pmeindex(dd->nnodes,dd->comm->npmenodes,ddindex);
 +}
 +
 +static int cr_ddindex2pmeindex(const t_commrec *cr,int ddindex)
 +{
 +    return low_ddindex2pmeindex(cr->dd->nnodes,cr->npmenodes,ddindex);
 +}
 +
 +static int *dd_pmenodes(t_commrec *cr)
 +{
 +    int *pmenodes;
 +    int n,i,p0,p1;
 +    
 +    snew(pmenodes,cr->npmenodes);
 +    n = 0;
 +    for(i=0; i<cr->dd->nnodes; i++) {
 +        p0 = cr_ddindex2pmeindex(cr,i);
 +        p1 = cr_ddindex2pmeindex(cr,i+1);
 +        if (i+1 == cr->dd->nnodes || p1 > p0) {
 +            if (debug)
 +                fprintf(debug,"pmenode[%d] = %d\n",n,i+1+n);
 +            pmenodes[n] = i + 1 + n;
 +            n++;
 +        }
 +    }
 +
 +    return pmenodes;
 +}
 +
 +static int gmx_ddcoord2pmeindex(t_commrec *cr,int x,int y,int z)
 +{
 +    gmx_domdec_t *dd;
 +    ivec coords,coords_pme,nc;
 +    int  slab;
 +    
 +    dd = cr->dd;
 +    /*
 +      if (dd->comm->bCartesian) {
 +      gmx_ddindex2xyz(dd->nc,ddindex,coords);
 +      dd_coords2pmecoords(dd,coords,coords_pme);
 +      copy_ivec(dd->ntot,nc);
 +      nc[dd->cartpmedim]         -= dd->nc[dd->cartpmedim];
 +      coords_pme[dd->cartpmedim] -= dd->nc[dd->cartpmedim];
 +      
 +      slab = (coords_pme[XX]*nc[YY] + coords_pme[YY])*nc[ZZ] + coords_pme[ZZ];
 +      } else {
 +      slab = (ddindex*cr->npmenodes + cr->npmenodes/2)/dd->nnodes;
 +      }
 +    */
 +    coords[XX] = x;
 +    coords[YY] = y;
 +    coords[ZZ] = z;
 +    slab = ddindex2pmeindex(dd,dd_index(dd->nc,coords));
 +    
 +    return slab;
 +}
 +
 +static int ddcoord2simnodeid(t_commrec *cr,int x,int y,int z)
 +{
 +    gmx_domdec_comm_t *comm;
 +    ivec coords;
 +    int  ddindex,nodeid=-1;
 +    
 +    comm = cr->dd->comm;
 +    
 +    coords[XX] = x;
 +    coords[YY] = y;
 +    coords[ZZ] = z;
 +    if (comm->bCartesianPP_PME)
 +    {
 +#ifdef GMX_MPI
 +        MPI_Cart_rank(cr->mpi_comm_mysim,coords,&nodeid);
 +#endif
 +    }
 +    else
 +    {
 +        ddindex = dd_index(cr->dd->nc,coords);
 +        if (comm->bCartesianPP)
 +        {
 +            nodeid = comm->ddindex2simnodeid[ddindex];
 +        }
 +        else
 +        {
 +            if (comm->pmenodes)
 +            {
 +                nodeid = ddindex + gmx_ddcoord2pmeindex(cr,x,y,z);
 +            }
 +            else
 +            {
 +                nodeid = ddindex;
 +            }
 +        }
 +    }
 +  
 +    return nodeid;
 +}
 +
 +static int dd_simnode2pmenode(t_commrec *cr,int sim_nodeid)
 +{
 +    gmx_domdec_t *dd;
 +    gmx_domdec_comm_t *comm;
 +    ivec coord,coord_pme;
 +    int  i;
 +    int  pmenode=-1;
 +    
 +    dd = cr->dd;
 +    comm = dd->comm;
 +    
 +    /* This assumes a uniform x domain decomposition grid cell size */
 +    if (comm->bCartesianPP_PME)
 +    {
 +#ifdef GMX_MPI
 +        MPI_Cart_coords(cr->mpi_comm_mysim,sim_nodeid,DIM,coord);
 +        if (coord[comm->cartpmedim] < dd->nc[comm->cartpmedim])
 +        {
 +            /* This is a PP node */
 +            dd_cart_coord2pmecoord(dd,coord,coord_pme);
 +            MPI_Cart_rank(cr->mpi_comm_mysim,coord_pme,&pmenode);
 +        }
 +#endif
 +    }
 +    else if (comm->bCartesianPP)
 +    {
 +        if (sim_nodeid < dd->nnodes)
 +        {
 +            pmenode = dd->nnodes + ddindex2pmeindex(dd,sim_nodeid);
 +        }
 +    }
 +    else
 +    {
 +        /* This assumes DD cells with identical x coordinates
 +         * are numbered sequentially.
 +         */
 +        if (dd->comm->pmenodes == NULL)
 +        {
 +            if (sim_nodeid < dd->nnodes)
 +            {
 +                /* The DD index equals the nodeid */
 +                pmenode = dd->nnodes + ddindex2pmeindex(dd,sim_nodeid);
 +            }
 +        }
 +        else
 +        {
 +            i = 0;
 +            while (sim_nodeid > dd->comm->pmenodes[i])
 +            {
 +                i++;
 +            }
 +            if (sim_nodeid < dd->comm->pmenodes[i])
 +            {
 +                pmenode = dd->comm->pmenodes[i];
 +            }
 +        }
 +    }
 +    
 +    return pmenode;
 +}
 +
 +gmx_bool gmx_pmeonlynode(t_commrec *cr,int sim_nodeid)
 +{
 +    gmx_bool bPMEOnlyNode;
 +    
 +    if (DOMAINDECOMP(cr))
 +    {
 +        bPMEOnlyNode = (dd_simnode2pmenode(cr,sim_nodeid) == -1);
 +    }
 +    else
 +    {
 +        bPMEOnlyNode = FALSE;
 +    }
 +    
 +    return bPMEOnlyNode;
 +}
 +
 +void get_pme_ddnodes(t_commrec *cr,int pmenodeid,
 +                     int *nmy_ddnodes,int **my_ddnodes,int *node_peer)
 +{
 +    gmx_domdec_t *dd;
 +    int x,y,z;
 +    ivec coord,coord_pme;
 +    
 +    dd = cr->dd;
 +    
 +    snew(*my_ddnodes,(dd->nnodes+cr->npmenodes-1)/cr->npmenodes);
 +    
 +    *nmy_ddnodes = 0;
 +    for(x=0; x<dd->nc[XX]; x++)
 +    {
 +        for(y=0; y<dd->nc[YY]; y++)
 +        {
 +            for(z=0; z<dd->nc[ZZ]; z++)
 +            {
 +                if (dd->comm->bCartesianPP_PME)
 +                {
 +                    coord[XX] = x;
 +                    coord[YY] = y;
 +                    coord[ZZ] = z;
 +                    dd_cart_coord2pmecoord(dd,coord,coord_pme);
 +                    if (dd->ci[XX] == coord_pme[XX] &&
 +                        dd->ci[YY] == coord_pme[YY] &&
 +                        dd->ci[ZZ] == coord_pme[ZZ])
 +                        (*my_ddnodes)[(*nmy_ddnodes)++] = ddcoord2simnodeid(cr,x,y,z);
 +                }
 +                else
 +                {
 +                    /* The slab corresponds to the nodeid in the PME group */
 +                    if (gmx_ddcoord2pmeindex(cr,x,y,z) == pmenodeid)
 +                    {
 +                        (*my_ddnodes)[(*nmy_ddnodes)++] = ddcoord2simnodeid(cr,x,y,z);
 +                    }
 +                }
 +            }
 +        }
 +    }
 +    
 +    /* The last PP-only node is the peer node */
 +    *node_peer = (*my_ddnodes)[*nmy_ddnodes-1];
 +    
 +    if (debug)
 +    {
 +        fprintf(debug,"Receive coordinates from PP nodes:");
 +        for(x=0; x<*nmy_ddnodes; x++)
 +        {
 +            fprintf(debug," %d",(*my_ddnodes)[x]);
 +        }
 +        fprintf(debug,"\n");
 +    }
 +}
 +
 +static gmx_bool receive_vir_ener(t_commrec *cr)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int  pmenode,coords[DIM],rank;
 +    gmx_bool bReceive;
 +    
 +    bReceive = TRUE;
 +    if (cr->npmenodes < cr->dd->nnodes)
 +    {
 +        comm = cr->dd->comm;
 +        if (comm->bCartesianPP_PME)
 +        {
 +            pmenode = dd_simnode2pmenode(cr,cr->sim_nodeid);
 +#ifdef GMX_MPI
 +            MPI_Cart_coords(cr->mpi_comm_mysim,cr->sim_nodeid,DIM,coords);
 +            coords[comm->cartpmedim]++;
 +            if (coords[comm->cartpmedim] < cr->dd->nc[comm->cartpmedim])
 +            {
 +                MPI_Cart_rank(cr->mpi_comm_mysim,coords,&rank);
 +                if (dd_simnode2pmenode(cr,rank) == pmenode)
 +                {
 +                    /* This is not the last PP node for pmenode */
 +                    bReceive = FALSE;
 +                }
 +            }
 +#endif  
 +        }
 +        else
 +        {
 +            pmenode = dd_simnode2pmenode(cr,cr->sim_nodeid);
 +            if (cr->sim_nodeid+1 < cr->nnodes &&
 +                dd_simnode2pmenode(cr,cr->sim_nodeid+1) == pmenode)
 +            {
 +                /* This is not the last PP node for pmenode */
 +                bReceive = FALSE;
 +            }
 +        }
 +    }
 +    
 +    return bReceive;
 +}
 +
 +static void set_zones_ncg_home(gmx_domdec_t *dd)
 +{
 +    gmx_domdec_zones_t *zones;
 +    int i;
 +
 +    zones = &dd->comm->zones;
 +
 +    zones->cg_range[0] = 0;
 +    for(i=1; i<zones->n+1; i++)
 +    {
 +        zones->cg_range[i] = dd->ncg_home;
 +    }
 +}
 +
 +static void rebuild_cgindex(gmx_domdec_t *dd,
 +                            const int *gcgs_index,t_state *state)
 +{
 +    int nat,i,*ind,*dd_cg_gl,*cgindex,cg_gl;
 +    
 +    ind = state->cg_gl;
 +    dd_cg_gl = dd->index_gl;
 +    cgindex  = dd->cgindex;
 +    nat = 0;
 +    cgindex[0] = nat;
 +    for(i=0; i<state->ncg_gl; i++)
 +    {
 +        cgindex[i] = nat;
 +        cg_gl = ind[i];
 +        dd_cg_gl[i] = cg_gl;
 +        nat += gcgs_index[cg_gl+1] - gcgs_index[cg_gl];
 +    }
 +    cgindex[i] = nat;
 +    
 +    dd->ncg_home = state->ncg_gl;
 +    dd->nat_home = nat;
 +
 +    set_zones_ncg_home(dd);
 +}
 +
 +static int ddcginfo(const cginfo_mb_t *cginfo_mb,int cg)
 +{
 +    while (cg >= cginfo_mb->cg_end)
 +    {
 +        cginfo_mb++;
 +    }
 +
 +    return cginfo_mb->cginfo[(cg - cginfo_mb->cg_start) % cginfo_mb->cg_mod];
 +}
 +
 +static void dd_set_cginfo(int *index_gl,int cg0,int cg1,
 +                          t_forcerec *fr,char *bLocalCG)
 +{
 +    cginfo_mb_t *cginfo_mb;
 +    int *cginfo;
 +    int cg;
 +
 +    if (fr != NULL)
 +    {
 +        cginfo_mb = fr->cginfo_mb;
 +        cginfo    = fr->cginfo;
 +
 +        for(cg=cg0; cg<cg1; cg++)
 +        {
 +            cginfo[cg] = ddcginfo(cginfo_mb,index_gl[cg]);
 +        }
 +    }
 +
 +    if (bLocalCG != NULL)
 +    {
 +        for(cg=cg0; cg<cg1; cg++)
 +        {
 +            bLocalCG[index_gl[cg]] = TRUE;
 +        }
 +    }
 +}
 +
 +static void make_dd_indices(gmx_domdec_t *dd,
 +                            const int *gcgs_index,int cg_start)
 +{
 +    int nzone,zone,zone1,cg0,cg1,cg1_p1,cg,cg_gl,a,a_gl;
 +    int *zone2cg,*zone_ncg1,*index_gl,*gatindex;
 +    gmx_ga2la_t *ga2la;
 +    char *bLocalCG;
 +    gmx_bool bCGs;
 +
 +    bLocalCG = dd->comm->bLocalCG;
 +
 +    if (dd->nat_tot > dd->gatindex_nalloc)
 +    {
 +        dd->gatindex_nalloc = over_alloc_dd(dd->nat_tot);
 +        srenew(dd->gatindex,dd->gatindex_nalloc);
 +    }
 +
 +    nzone      = dd->comm->zones.n;
 +    zone2cg    = dd->comm->zones.cg_range;
 +    zone_ncg1  = dd->comm->zone_ncg1;
 +    index_gl   = dd->index_gl;
 +    gatindex   = dd->gatindex;
 +    bCGs       = dd->comm->bCGs;
 +
 +    if (zone2cg[1] != dd->ncg_home)
 +    {
 +        gmx_incons("dd->ncg_zone is not up to date");
 +    }
 +    
 +    /* Make the local to global and global to local atom index */
 +    a = dd->cgindex[cg_start];
 +    for(zone=0; zone<nzone; zone++)
 +    {
 +        if (zone == 0)
 +        {
 +            cg0 = cg_start;
 +        }
 +        else
 +        {
 +            cg0 = zone2cg[zone];
 +        }
 +        cg1    = zone2cg[zone+1];
 +        cg1_p1 = cg0 + zone_ncg1[zone];
 +
 +        for(cg=cg0; cg<cg1; cg++)
 +        {
 +            zone1 = zone;
 +            if (cg >= cg1_p1)
 +            {
 +                /* Signal that this cg is from more than one pulse away */
 +                zone1 += nzone;
 +            }
 +            cg_gl = index_gl[cg];
 +            if (bCGs)
 +            {
 +                for(a_gl=gcgs_index[cg_gl]; a_gl<gcgs_index[cg_gl+1]; a_gl++)
 +                {
 +                    gatindex[a] = a_gl;
 +                    ga2la_set(dd->ga2la,a_gl,a,zone1);
 +                    a++;
 +                }
 +            }
 +            else
 +            {
 +                gatindex[a] = cg_gl;
 +                ga2la_set(dd->ga2la,cg_gl,a,zone1);
 +                a++;
 +            }
 +        }
 +    }
 +}
 +
 +static int check_bLocalCG(gmx_domdec_t *dd,int ncg_sys,const char *bLocalCG,
 +                          const char *where)
 +{
 +    int ncg,i,ngl,nerr;
 +
 +    nerr = 0;
 +    if (bLocalCG == NULL)
 +    {
 +        return nerr;
 +    }
 +    for(i=0; i<dd->ncg_tot; i++)
 +    {
 +        if (!bLocalCG[dd->index_gl[i]])
 +        {
 +            fprintf(stderr,
 +                    "DD node %d, %s: cg %d, global cg %d is not marked in bLocalCG (ncg_home %d)\n",dd->rank,where,i+1,dd->index_gl[i]+1,dd->ncg_home);
 +            nerr++;
 +        }
 +    }
 +    ngl = 0;
 +    for(i=0; i<ncg_sys; i++)
 +    {
 +        if (bLocalCG[i])
 +        {
 +            ngl++;
 +        }
 +    }
 +    if (ngl != dd->ncg_tot)
 +    {
 +        fprintf(stderr,"DD node %d, %s: In bLocalCG %d cgs are marked as local, whereas there are %d\n",dd->rank,where,ngl,dd->ncg_tot);
 +        nerr++;
 +    }
 +
 +    return nerr;
 +}
 +
 +static void check_index_consistency(gmx_domdec_t *dd,
 +                                    int natoms_sys,int ncg_sys,
 +                                    const char *where)
 +{
 +    int  nerr,ngl,i,a,cell;
 +    int  *have;
 +
 +    nerr = 0;
 +
 +    if (dd->comm->DD_debug > 1)
 +    {
 +        snew(have,natoms_sys);
 +        for(a=0; a<dd->nat_tot; a++)
 +        {
 +            if (have[dd->gatindex[a]] > 0)
 +            {
 +                fprintf(stderr,"DD node %d: global atom %d occurs twice: index %d and %d\n",dd->rank,dd->gatindex[a]+1,have[dd->gatindex[a]],a+1);
 +            }
 +            else
 +            {
 +                have[dd->gatindex[a]] = a + 1;
 +            }
 +        }
 +        sfree(have);
 +    }
 +
 +    snew(have,dd->nat_tot);
 +
 +    ngl  = 0;
 +    for(i=0; i<natoms_sys; i++)
 +    {
 +        if (ga2la_get(dd->ga2la,i,&a,&cell))
 +        {
 +            if (a >= dd->nat_tot)
 +            {
 +                fprintf(stderr,"DD node %d: global atom %d marked as local atom %d, which is larger than nat_tot (%d)\n",dd->rank,i+1,a+1,dd->nat_tot);
 +                nerr++;
 +            }
 +            else
 +            {
 +                have[a] = 1;
 +                if (dd->gatindex[a] != i)
 +                {
 +                    fprintf(stderr,"DD node %d: global atom %d marked as local atom %d, which has global atom index %d\n",dd->rank,i+1,a+1,dd->gatindex[a]+1);
 +                    nerr++;
 +                }
 +            }
 +            ngl++;
 +        }
 +    }
 +    if (ngl != dd->nat_tot)
 +    {
 +        fprintf(stderr,
 +                "DD node %d, %s: %d global atom indices, %d local atoms\n",
 +                dd->rank,where,ngl,dd->nat_tot);
 +    }
 +    for(a=0; a<dd->nat_tot; a++)
 +    {
 +        if (have[a] == 0)
 +        {
 +            fprintf(stderr,
 +                    "DD node %d, %s: local atom %d, global %d has no global index\n",
 +                    dd->rank,where,a+1,dd->gatindex[a]+1);
 +        }
 +    }
 +    sfree(have);
 +
 +    nerr += check_bLocalCG(dd,ncg_sys,dd->comm->bLocalCG,where);
 +
 +    if (nerr > 0) {
 +        gmx_fatal(FARGS,"DD node %d, %s: %d atom/cg index inconsistencies",
 +                  dd->rank,where,nerr);
 +    }
 +}
 +
 +static void clear_dd_indices(gmx_domdec_t *dd,int cg_start,int a_start)
 +{
 +    int  i;
 +    char *bLocalCG;
 +
 +    if (a_start == 0)
 +    {
 +        /* Clear the whole list without searching */
 +        ga2la_clear(dd->ga2la);
 +    }
 +    else
 +    {
 +        for(i=a_start; i<dd->nat_tot; i++)
 +        {
 +            ga2la_del(dd->ga2la,dd->gatindex[i]);
 +        }
 +    }
 +
 +    bLocalCG = dd->comm->bLocalCG;
 +    if (bLocalCG)
 +    {
 +        for(i=cg_start; i<dd->ncg_tot; i++)
 +        {
 +            bLocalCG[dd->index_gl[i]] = FALSE;
 +        }
 +    }
 +
 +    dd_clear_local_vsite_indices(dd);
 +    
 +    if (dd->constraints)
 +    {
 +        dd_clear_local_constraint_indices(dd);
 +    }
 +}
 +
 +static real grid_jump_limit(gmx_domdec_comm_t *comm,real cutoff,
 +                            int dim_ind)
 +{
 +    real grid_jump_limit;
 +
 +    /* The distance between the boundaries of cells at distance
 +     * x+-1,y+-1 or y+-1,z+-1 is limited by the cut-off restrictions
 +     * and by the fact that cells should not be shifted by more than
 +     * half their size, such that cg's only shift by one cell
 +     * at redecomposition.
 +     */
 +    grid_jump_limit = comm->cellsize_limit;
 +    if (!comm->bVacDLBNoLimit)
 +    {
 +        grid_jump_limit = max(grid_jump_limit,
 +                              cutoff/comm->cd[dim_ind].np);
 +    }
 +
 +    return grid_jump_limit;
 +}
 +
 +static gmx_bool check_grid_jump(gmx_large_int_t step,
 +                                gmx_domdec_t *dd,
 +                                real cutoff,
 +                                gmx_ddbox_t *ddbox,
 +                                gmx_bool bFatal)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int  d,dim;
 +    real limit,bfac;
 +    gmx_bool bInvalid;
 +
 +    bInvalid = FALSE;
 +
 +    comm = dd->comm;
 +    
 +    for(d=1; d<dd->ndim; d++)
 +    {
 +        dim = dd->dim[d];
 +        limit = grid_jump_limit(comm,cutoff,d);
 +        bfac = ddbox->box_size[dim];
 +        if (ddbox->tric_dir[dim])
 +        {
 +            bfac *= ddbox->skew_fac[dim];
 +        }
 +        if ((comm->cell_f1[d] - comm->cell_f_max0[d])*bfac <  limit ||
 +            (comm->cell_f0[d] - comm->cell_f_min1[d])*bfac > -limit)
 +        {
 +            bInvalid = TRUE;
 +
 +            if (bFatal)
 +            {
 +                char buf[22];
 +
 +                /* This error should never be triggered under normal
 +                 * circumstances, but you never know ...
 +                 */
 +                gmx_fatal(FARGS,"Step %s: The domain decomposition grid has shifted too much in the %c-direction around cell %d %d %d. This should not have happened. Running with less nodes might avoid this issue.",
 +                          gmx_step_str(step,buf),
 +                          dim2char(dim),dd->ci[XX],dd->ci[YY],dd->ci[ZZ]);
 +            }
 +        }
 +    }
 +
 +    return bInvalid;
 +}
 +
 +static int dd_load_count(gmx_domdec_comm_t *comm)
 +{
 +    return (comm->eFlop ? comm->flop_n : comm->cycl_n[ddCyclF]);
 +}
 +
 +static float dd_force_load(gmx_domdec_comm_t *comm)
 +{
 +    float load;
 +    
 +    if (comm->eFlop)
 +    {
 +        load = comm->flop;
 +        if (comm->eFlop > 1)
 +        {
 +            load *= 1.0 + (comm->eFlop - 1)*(0.1*rand()/RAND_MAX - 0.05);
 +        }
 +    } 
 +    else
 +    {
 +        load = comm->cycl[ddCyclF];
 +        if (comm->cycl_n[ddCyclF] > 1)
 +        {
 +            /* Subtract the maximum of the last n cycle counts
 +             * to get rid of possible high counts due to other soures,
 +             * for instance system activity, that would otherwise
 +             * affect the dynamic load balancing.
 +             */
 +            load -= comm->cycl_max[ddCyclF];
 +        }
 +    }
 +    
 +    return load;
 +}
 +
 +static void set_slb_pme_dim_f(gmx_domdec_t *dd,int dim,real **dim_f)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int i;
 +    
 +    comm = dd->comm;
 +    
 +    snew(*dim_f,dd->nc[dim]+1);
 +    (*dim_f)[0] = 0;
 +    for(i=1; i<dd->nc[dim]; i++)
 +    {
 +        if (comm->slb_frac[dim])
 +        {
 +            (*dim_f)[i] = (*dim_f)[i-1] + comm->slb_frac[dim][i-1];
 +        }
 +        else
 +        {
 +            (*dim_f)[i] = (real)i/(real)dd->nc[dim];
 +        }
 +    }
 +    (*dim_f)[dd->nc[dim]] = 1;
 +}
 +
 +static void init_ddpme(gmx_domdec_t *dd,gmx_ddpme_t *ddpme,int dimind)
 +{
 +    int        pmeindex,slab,nso,i;
 +    ivec xyz;
 +    
 +    if (dimind == 0 && dd->dim[0] == YY && dd->comm->npmenodes_x == 1)
 +    {
 +        ddpme->dim = YY;
 +    }
 +    else
 +    {
 +        ddpme->dim = dimind;
 +    }
 +    ddpme->dim_match = (ddpme->dim == dd->dim[dimind]);
 +    
 +    ddpme->nslab = (ddpme->dim == 0 ?
 +                    dd->comm->npmenodes_x :
 +                    dd->comm->npmenodes_y);
 +
 +    if (ddpme->nslab <= 1)
 +    {
 +        return;
 +    }
 +
 +    nso = dd->comm->npmenodes/ddpme->nslab;
 +    /* Determine for each PME slab the PP location range for dimension dim */
 +    snew(ddpme->pp_min,ddpme->nslab);
 +    snew(ddpme->pp_max,ddpme->nslab);
 +    for(slab=0; slab<ddpme->nslab; slab++) {
 +        ddpme->pp_min[slab] = dd->nc[dd->dim[dimind]] - 1;
 +        ddpme->pp_max[slab] = 0;
 +    }
 +    for(i=0; i<dd->nnodes; i++) {
 +        ddindex2xyz(dd->nc,i,xyz);
 +        /* For y only use our y/z slab.
 +         * This assumes that the PME x grid size matches the DD grid size.
 +         */
 +        if (dimind == 0 || xyz[XX] == dd->ci[XX]) {
 +            pmeindex = ddindex2pmeindex(dd,i);
 +            if (dimind == 0) {
 +                slab = pmeindex/nso;
 +            } else {
 +                slab = pmeindex % ddpme->nslab;
 +            }
 +            ddpme->pp_min[slab] = min(ddpme->pp_min[slab],xyz[dimind]);
 +            ddpme->pp_max[slab] = max(ddpme->pp_max[slab],xyz[dimind]);
 +        }
 +    }
 +
 +    set_slb_pme_dim_f(dd,ddpme->dim,&ddpme->slb_dim_f);
 +}
 +
 +int dd_pme_maxshift_x(gmx_domdec_t *dd)
 +{
 +    if (dd->comm->ddpme[0].dim == XX)
 +    {
 +        return dd->comm->ddpme[0].maxshift;
 +    }
 +    else
 +    {
 +        return 0;
 +    }
 +}
 +
 +int dd_pme_maxshift_y(gmx_domdec_t *dd)
 +{
 +    if (dd->comm->ddpme[0].dim == YY)
 +    {
 +        return dd->comm->ddpme[0].maxshift;
 +    }
 +    else if (dd->comm->npmedecompdim >= 2 && dd->comm->ddpme[1].dim == YY)
 +    {
 +        return dd->comm->ddpme[1].maxshift;
 +    }
 +    else
 +    {
 +        return 0;
 +    }
 +}
 +
 +static void set_pme_maxshift(gmx_domdec_t *dd,gmx_ddpme_t *ddpme,
 +                             gmx_bool bUniform,gmx_ddbox_t *ddbox,real *cell_f)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int  nc,ns,s;
 +    int  *xmin,*xmax;
 +    real range,pme_boundary;
 +    int  sh;
 +    
 +    comm = dd->comm;
 +    nc  = dd->nc[ddpme->dim];
 +    ns  = ddpme->nslab;
 +    
 +    if (!ddpme->dim_match)
 +    {
 +        /* PP decomposition is not along dim: the worst situation */
 +        sh = ns/2;
 +    }
 +    else if (ns <= 3 || (bUniform && ns == nc))
 +    {
 +        /* The optimal situation */
 +        sh = 1;
 +    }
 +    else
 +    {
 +        /* We need to check for all pme nodes which nodes they
 +         * could possibly need to communicate with.
 +         */
 +        xmin = ddpme->pp_min;
 +        xmax = ddpme->pp_max;
 +        /* Allow for atoms to be maximally 2/3 times the cut-off
 +         * out of their DD cell. This is a reasonable balance between
 +         * between performance and support for most charge-group/cut-off
 +         * combinations.
 +         */
 +        range  = 2.0/3.0*comm->cutoff/ddbox->box_size[ddpme->dim];
 +        /* Avoid extra communication when we are exactly at a boundary */
 +        range *= 0.999;
 +        
 +        sh = 1;
 +        for(s=0; s<ns; s++)
 +        {
 +            /* PME slab s spreads atoms between box frac. s/ns and (s+1)/ns */
 +            pme_boundary = (real)s/ns;
 +            while (sh+1 < ns &&
 +                   ((s-(sh+1) >= 0 &&
 +                     cell_f[xmax[s-(sh+1)   ]+1]     + range > pme_boundary) ||
 +                    (s-(sh+1) <  0 &&
 +                     cell_f[xmax[s-(sh+1)+ns]+1] - 1 + range > pme_boundary)))
 +            {
 +                sh++;
 +            }
 +            pme_boundary = (real)(s+1)/ns;
 +            while (sh+1 < ns &&
 +                   ((s+(sh+1) <  ns &&
 +                     cell_f[xmin[s+(sh+1)   ]  ]     - range < pme_boundary) ||
 +                    (s+(sh+1) >= ns &&
 +                     cell_f[xmin[s+(sh+1)-ns]  ] + 1 - range < pme_boundary)))
 +            {
 +                sh++;
 +            }
 +        }
 +    }
 +    
 +    ddpme->maxshift = sh;
 +    
 +    if (debug)
 +    {
 +        fprintf(debug,"PME slab communication range for dim %d is %d\n",
 +                ddpme->dim,ddpme->maxshift);
 +    }
 +}
 +
 +static void check_box_size(gmx_domdec_t *dd,gmx_ddbox_t *ddbox)
 +{
 +    int d,dim;
 +    
 +    for(d=0; d<dd->ndim; d++)
 +    {
 +        dim = dd->dim[d];
 +        if (dim < ddbox->nboundeddim &&
 +            ddbox->box_size[dim]*ddbox->skew_fac[dim] <
 +            dd->nc[dim]*dd->comm->cellsize_limit*DD_CELL_MARGIN)
 +        {
 +            gmx_fatal(FARGS,"The %c-size of the box (%f) times the triclinic skew factor (%f) is smaller than the number of DD cells (%d) times the smallest allowed cell size (%f)\n",
 +                      dim2char(dim),ddbox->box_size[dim],ddbox->skew_fac[dim],
 +                      dd->nc[dim],dd->comm->cellsize_limit);
 +        }
 +    }
 +}
 +
 +static void set_dd_cell_sizes_slb(gmx_domdec_t *dd,gmx_ddbox_t *ddbox,
 +                                  gmx_bool bMaster,ivec npulse)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int  d,j;
 +    rvec cellsize_min;
 +    real *cell_x,cell_dx,cellsize;
 +    
 +    comm = dd->comm;
 +    
 +    for(d=0; d<DIM; d++)
 +    {
 +        cellsize_min[d] = ddbox->box_size[d]*ddbox->skew_fac[d];
 +        npulse[d] = 1;
 +        if (dd->nc[d] == 1 || comm->slb_frac[d] == NULL)
 +        {
 +            /* Uniform grid */
 +            cell_dx = ddbox->box_size[d]/dd->nc[d];
 +            if (bMaster)
 +            {
 +                for(j=0; j<dd->nc[d]+1; j++)
 +                {
 +                    dd->ma->cell_x[d][j] = ddbox->box0[d] + j*cell_dx;
 +                }
 +            }
 +            else
 +            {
 +                comm->cell_x0[d] = ddbox->box0[d] + (dd->ci[d]  )*cell_dx;
 +                comm->cell_x1[d] = ddbox->box0[d] + (dd->ci[d]+1)*cell_dx;
 +            }
 +            cellsize = cell_dx*ddbox->skew_fac[d];
 +            while (cellsize*npulse[d] < comm->cutoff && npulse[d] < dd->nc[d]-1)
 +            {
 +                npulse[d]++;
 +            }
 +            cellsize_min[d] = cellsize;
 +        }
 +        else
 +        {
 +            /* Statically load balanced grid */
 +            /* Also when we are not doing a master distribution we determine
 +             * all cell borders in a loop to obtain identical values
 +             * to the master distribution case and to determine npulse.
 +             */
 +            if (bMaster)
 +            {
 +                cell_x = dd->ma->cell_x[d];
 +            }
 +            else
 +            {
 +                snew(cell_x,dd->nc[d]+1);
 +            }
 +            cell_x[0] = ddbox->box0[d];
 +            for(j=0; j<dd->nc[d]; j++)
 +            {
 +                cell_dx = ddbox->box_size[d]*comm->slb_frac[d][j];
 +                cell_x[j+1] = cell_x[j] + cell_dx;
 +                cellsize = cell_dx*ddbox->skew_fac[d];
 +                while (cellsize*npulse[d] < comm->cutoff &&
 +                       npulse[d] < dd->nc[d]-1)
 +                {
 +                    npulse[d]++;
 +                }
 +                cellsize_min[d] = min(cellsize_min[d],cellsize);
 +            }
 +            if (!bMaster)
 +            {
 +                comm->cell_x0[d] = cell_x[dd->ci[d]];
 +                comm->cell_x1[d] = cell_x[dd->ci[d]+1];
 +                sfree(cell_x);
 +            }
 +        }
 +        /* The following limitation is to avoid that a cell would receive
 +         * some of its own home charge groups back over the periodic boundary.
 +         * Double charge groups cause trouble with the global indices.
 +         */
 +        if (d < ddbox->npbcdim &&
 +            dd->nc[d] > 1 && npulse[d] >= dd->nc[d])
 +        {
 +            gmx_fatal_collective(FARGS,NULL,dd,
 +                                 "The box size in direction %c (%f) times the triclinic skew factor (%f) is too small for a cut-off of %f with %d domain decomposition cells, use 1 or more than %d %s or increase the box size in this direction",
 +                                 dim2char(d),ddbox->box_size[d],ddbox->skew_fac[d],
 +                                 comm->cutoff,
 +                                 dd->nc[d],dd->nc[d],
 +                                 dd->nnodes > dd->nc[d] ? "cells" : "processors");
 +        }
 +    }
 +    
 +    if (!comm->bDynLoadBal)
 +    {
 +        copy_rvec(cellsize_min,comm->cellsize_min);
 +    }
 +   
 +    for(d=0; d<comm->npmedecompdim; d++)
 +    {
 +        set_pme_maxshift(dd,&comm->ddpme[d],
 +                         comm->slb_frac[dd->dim[d]]==NULL,ddbox,
 +                         comm->ddpme[d].slb_dim_f);
 +    }
 +}
 +
 +
 +static void dd_cell_sizes_dlb_root_enforce_limits(gmx_domdec_t *dd,
 +                                       int d,int dim,gmx_domdec_root_t *root,
 +                                       gmx_ddbox_t *ddbox,
 +                                       gmx_bool bUniform,gmx_large_int_t step, real cellsize_limit_f, int range[])
 +{
 +    gmx_domdec_comm_t *comm;
 +    int  ncd,i,j,nmin,nmin_old;
 +    gmx_bool bLimLo,bLimHi;
 +    real *cell_size;
 +    real fac,halfway,cellsize_limit_f_i,region_size;
 +    gmx_bool bPBC,bLastHi=FALSE;
 +    int nrange[]={range[0],range[1]};
 +
 +    region_size= root->cell_f[range[1]]-root->cell_f[range[0]];  
 +
 +    comm = dd->comm;
 +
 +    ncd = dd->nc[dim];
 +
 +    bPBC = (dim < ddbox->npbcdim);
 +
 +    cell_size = root->buf_ncd;
 +
 +    if (debug) 
 +    {
 +        fprintf(debug,"enforce_limits: %d %d\n",range[0],range[1]);
 +    }
 +
 +    /* First we need to check if the scaling does not make cells
 +     * smaller than the smallest allowed size.
 +     * We need to do this iteratively, since if a cell is too small,
 +     * it needs to be enlarged, which makes all the other cells smaller,
 +     * which could in turn make another cell smaller than allowed.
 +     */
 +    for(i=range[0]; i<range[1]; i++)
 +    {
 +        root->bCellMin[i] = FALSE;
 +    }
 +    nmin = 0;
 +    do
 +    {
 +        nmin_old = nmin;
 +        /* We need the total for normalization */
 +        fac = 0;
 +        for(i=range[0]; i<range[1]; i++)
 +        {
 +            if (root->bCellMin[i] == FALSE)
 +            {
 +                fac += cell_size[i];
 +            }
 +        }
 +        fac = ( region_size - nmin*cellsize_limit_f)/fac; /* substracting cells already set to cellsize_limit_f */
 +        /* Determine the cell boundaries */
 +        for(i=range[0]; i<range[1]; i++)
 +        {
 +            if (root->bCellMin[i] == FALSE)
 +            {
 +                cell_size[i] *= fac;
 +                if (!bPBC && (i == 0 || i == dd->nc[dim] -1))
 +                {
 +                    cellsize_limit_f_i = 0;
 +                }
 +                else
 +                {
 +                    cellsize_limit_f_i = cellsize_limit_f;
 +                }
 +                if (cell_size[i] < cellsize_limit_f_i)
 +                {
 +                    root->bCellMin[i] = TRUE;
 +                    cell_size[i] = cellsize_limit_f_i;
 +                    nmin++;
 +                }
 +            }
 +            root->cell_f[i+1] = root->cell_f[i] + cell_size[i];
 +        }
 +    }
 +    while (nmin > nmin_old);
 +    
 +    i=range[1]-1;
 +    cell_size[i] = root->cell_f[i+1] - root->cell_f[i];
 +    /* For this check we should not use DD_CELL_MARGIN,
 +     * but a slightly smaller factor,
 +     * since rounding could get use below the limit.
 +     */
 +    if (bPBC && cell_size[i] < cellsize_limit_f*DD_CELL_MARGIN2/DD_CELL_MARGIN)
 +    {
 +        char buf[22];
 +        gmx_fatal(FARGS,"Step %s: the dynamic load balancing could not balance dimension %c: box size %f, triclinic skew factor %f, #cells %d, minimum cell size %f\n",
 +                  gmx_step_str(step,buf),
 +                  dim2char(dim),ddbox->box_size[dim],ddbox->skew_fac[dim],
 +                  ncd,comm->cellsize_min[dim]);
 +    }
 +    
 +    root->bLimited = (nmin > 0) || (range[0]>0) || (range[1]<ncd);
 +    
 +    if (!bUniform)
 +    {
 +        /* Check if the boundary did not displace more than halfway
 +         * each of the cells it bounds, as this could cause problems,
 +         * especially when the differences between cell sizes are large.
 +         * If changes are applied, they will not make cells smaller
 +         * than the cut-off, as we check all the boundaries which
 +         * might be affected by a change and if the old state was ok,
 +         * the cells will at most be shrunk back to their old size.
 +         */
 +        for(i=range[0]+1; i<range[1]; i++)
 +        {
 +            halfway = 0.5*(root->old_cell_f[i] + root->old_cell_f[i-1]);
 +            if (root->cell_f[i] < halfway)
 +            {
 +                root->cell_f[i] = halfway;
 +                /* Check if the change also causes shifts of the next boundaries */
 +                for(j=i+1; j<range[1]; j++)
 +                {
 +                    if (root->cell_f[j] < root->cell_f[j-1] + cellsize_limit_f)
 +                        root->cell_f[j] =  root->cell_f[j-1] + cellsize_limit_f;
 +                }
 +            }
 +            halfway = 0.5*(root->old_cell_f[i] + root->old_cell_f[i+1]);
 +            if (root->cell_f[i] > halfway)
 +            {
 +                root->cell_f[i] = halfway;
 +                /* Check if the change also causes shifts of the next boundaries */
 +                for(j=i-1; j>=range[0]+1; j--)
 +                {
 +                    if (root->cell_f[j] > root->cell_f[j+1] - cellsize_limit_f)
 +                        root->cell_f[j] = root->cell_f[j+1] - cellsize_limit_f;
 +                }
 +            }
 +        }
 +    }
 +    
 +    /* nrange is defined as [lower, upper) range for new call to enforce_limits */
 +    /* find highest violation of LimLo (a) and the following violation of LimHi (thus the lowest following) (b)
 +     * then call enforce_limits for (oldb,a), (a,b). In the next step: (b,nexta). oldb and nexta can be the boundaries.
 +     * for a and b nrange is used */
 +    if (d > 0)
 +    {
 +        /* Take care of the staggering of the cell boundaries */
 +        if (bUniform)
 +        {
 +            for(i=range[0]; i<range[1]; i++)
 +            {
 +                root->cell_f_max0[i] = root->cell_f[i];
 +                root->cell_f_min1[i] = root->cell_f[i+1];
 +            }
 +        }
 +        else
 +        {
 +            for(i=range[0]+1; i<range[1]; i++)
 +            {
 +                bLimLo = (root->cell_f[i] < root->bound_min[i]);
 +                bLimHi = (root->cell_f[i] > root->bound_max[i]);
 +                if (bLimLo && bLimHi)
 +                {
 +                    /* Both limits violated, try the best we can */
 +                    /* For this case we split the original range (range) in two parts and care about the other limitiations in the next iteration. */
 +                    root->cell_f[i] = 0.5*(root->bound_min[i] + root->bound_max[i]);
 +                    nrange[0]=range[0];
 +                    nrange[1]=i;
 +                    dd_cell_sizes_dlb_root_enforce_limits(dd, d, dim, root, ddbox, bUniform, step, cellsize_limit_f, nrange);
 +
 +                    nrange[0]=i;
 +                    nrange[1]=range[1];
 +                    dd_cell_sizes_dlb_root_enforce_limits(dd, d, dim, root, ddbox, bUniform, step, cellsize_limit_f, nrange);
 +
 +                    return;
 +                }
 +                else if (bLimLo)
 +                {
 +                    /* root->cell_f[i] = root->bound_min[i]; */
 +                    nrange[1]=i;  /* only store violation location. There could be a LimLo violation following with an higher index */
 +                    bLastHi=FALSE;
 +                }
 +                else if (bLimHi && !bLastHi)
 +                {
 +                    bLastHi=TRUE;
 +                    if (nrange[1] < range[1])   /* found a LimLo before */
 +                    {
 +                        root->cell_f[nrange[1]] = root->bound_min[nrange[1]];
 +                        dd_cell_sizes_dlb_root_enforce_limits(dd, d, dim, root, ddbox, bUniform, step, cellsize_limit_f, nrange);
 +                        nrange[0]=nrange[1];
 +                    }
 +                    root->cell_f[i] = root->bound_max[i];
 +                    nrange[1]=i; 
 +                    dd_cell_sizes_dlb_root_enforce_limits(dd, d, dim, root, ddbox, bUniform, step, cellsize_limit_f, nrange);
 +                    nrange[0]=i;
 +                    nrange[1]=range[1];
 +                }
 +            }
 +            if (nrange[1] < range[1])   /* found last a LimLo */
 +            {
 +                root->cell_f[nrange[1]] = root->bound_min[nrange[1]];
 +                dd_cell_sizes_dlb_root_enforce_limits(dd, d, dim, root, ddbox, bUniform, step, cellsize_limit_f, nrange);
 +                nrange[0]=nrange[1];
 +                nrange[1]=range[1];
 +                dd_cell_sizes_dlb_root_enforce_limits(dd, d, dim, root, ddbox, bUniform, step, cellsize_limit_f, nrange);
 +            } 
 +            else if (nrange[0] > range[0]) /* found at least one LimHi */
 +            {
 +                dd_cell_sizes_dlb_root_enforce_limits(dd, d, dim, root, ddbox, bUniform, step, cellsize_limit_f, nrange);
 +            }
 +        }
 +    }
 +}
 +
 +
 +static void set_dd_cell_sizes_dlb_root(gmx_domdec_t *dd,
 +                                       int d,int dim,gmx_domdec_root_t *root,
 +                                       gmx_ddbox_t *ddbox,gmx_bool bDynamicBox,
 +                                       gmx_bool bUniform,gmx_large_int_t step)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int  ncd,d1,i,j,pos;
 +    real *cell_size;
 +    real load_aver,load_i,imbalance,change,change_max,sc;
 +    real cellsize_limit_f,dist_min_f,dist_min_f_hard,space;
 +    real change_limit;
 +    real relax = 0.5;
 +    gmx_bool bPBC;
 +    int range[] = { 0, 0 };
 +
 +    comm = dd->comm;
 +
 +    /* Convert the maximum change from the input percentage to a fraction */
 +    change_limit = comm->dlb_scale_lim*0.01;
 +
 +    ncd = dd->nc[dim];
 +
 +    bPBC = (dim < ddbox->npbcdim);
 +
 +    cell_size = root->buf_ncd;
 +
 +    /* Store the original boundaries */
 +    for(i=0; i<ncd+1; i++)
 +    {
 +        root->old_cell_f[i] = root->cell_f[i];
 +    }
 +    if (bUniform) {
 +        for(i=0; i<ncd; i++)
 +        {
 +            cell_size[i] = 1.0/ncd;
 +        }
 +    }
 +    else if (dd_load_count(comm))
 +    {
 +        load_aver = comm->load[d].sum_m/ncd;
 +        change_max = 0;
 +        for(i=0; i<ncd; i++)
 +        {
 +            /* Determine the relative imbalance of cell i */
 +            load_i = comm->load[d].load[i*comm->load[d].nload+2];
 +            imbalance = (load_i - load_aver)/(load_aver>0 ? load_aver : 1);
 +            /* Determine the change of the cell size using underrelaxation */
 +            change = -relax*imbalance;
 +            change_max = max(change_max,max(change,-change));
 +        }
 +        /* Limit the amount of scaling.
 +         * We need to use the same rescaling for all cells in one row,
 +         * otherwise the load balancing might not converge.
 +         */
 +        sc = relax;
 +        if (change_max > change_limit)
 +        {
 +            sc *= change_limit/change_max;
 +        }
 +        for(i=0; i<ncd; i++)
 +        {
 +            /* Determine the relative imbalance of cell i */
 +            load_i = comm->load[d].load[i*comm->load[d].nload+2];
 +            imbalance = (load_i - load_aver)/(load_aver>0 ? load_aver : 1);
 +            /* Determine the change of the cell size using underrelaxation */
 +            change = -sc*imbalance;
 +            cell_size[i] = (root->cell_f[i+1]-root->cell_f[i])*(1 + change);
 +        }
 +    }
 +    
 +    cellsize_limit_f  = comm->cellsize_min[dim]/ddbox->box_size[dim];
 +    cellsize_limit_f *= DD_CELL_MARGIN;
 +    dist_min_f_hard   = grid_jump_limit(comm,comm->cutoff,d)/ddbox->box_size[dim];
 +    dist_min_f        = dist_min_f_hard * DD_CELL_MARGIN;
 +    if (ddbox->tric_dir[dim])
 +    {
 +        cellsize_limit_f /= ddbox->skew_fac[dim];
 +        dist_min_f       /= ddbox->skew_fac[dim];
 +    }
 +    if (bDynamicBox && d > 0)
 +    {
 +        dist_min_f *= DD_PRES_SCALE_MARGIN;
 +    }
 +    if (d > 0 && !bUniform)
 +    {
 +        /* Make sure that the grid is not shifted too much */
 +        for(i=1; i<ncd; i++) {
 +            if (root->cell_f_min1[i] - root->cell_f_max0[i-1] < 2 * dist_min_f_hard) 
 +            {
 +                gmx_incons("Inconsistent DD boundary staggering limits!");
 +            }
 +            root->bound_min[i] = root->cell_f_max0[i-1] + dist_min_f;
 +            space = root->cell_f[i] - (root->cell_f_max0[i-1] + dist_min_f);
 +            if (space > 0) {
 +                root->bound_min[i] += 0.5*space;
 +            }
 +            root->bound_max[i] = root->cell_f_min1[i] - dist_min_f;
 +            space = root->cell_f[i] - (root->cell_f_min1[i] - dist_min_f);
 +            if (space < 0) {
 +                root->bound_max[i] += 0.5*space;
 +            }
 +            if (debug)
 +            {
 +                fprintf(debug,
 +                        "dim %d boundary %d %.3f < %.3f < %.3f < %.3f < %.3f\n",
 +                        d,i,
 +                        root->cell_f_max0[i-1] + dist_min_f,
 +                        root->bound_min[i],root->cell_f[i],root->bound_max[i],
 +                        root->cell_f_min1[i] - dist_min_f);
 +            }
 +        }
 +    }
 +    range[1]=ncd;
 +    root->cell_f[0] = 0;
 +    root->cell_f[ncd] = 1;
 +    dd_cell_sizes_dlb_root_enforce_limits(dd, d, dim, root, ddbox, bUniform, step, cellsize_limit_f, range);
 +
 +
 +    /* After the checks above, the cells should obey the cut-off
 +     * restrictions, but it does not hurt to check.
 +     */
 +    for(i=0; i<ncd; i++)
 +    {
 +        if (debug)
 +        {
 +            fprintf(debug,"Relative bounds dim %d  cell %d: %f %f\n",
 +                    dim,i,root->cell_f[i],root->cell_f[i+1]);
 +        }
 +
 +        if ((bPBC || (i != 0 && i != dd->nc[dim]-1)) &&
 +            root->cell_f[i+1] - root->cell_f[i] <
 +            cellsize_limit_f/DD_CELL_MARGIN)
 +        {
 +            char buf[22];
 +            fprintf(stderr,
 +                    "\nWARNING step %s: direction %c, cell %d too small: %f\n",
 +                    gmx_step_str(step,buf),dim2char(dim),i,
 +                    (root->cell_f[i+1] - root->cell_f[i])
 +                    *ddbox->box_size[dim]*ddbox->skew_fac[dim]);
 +        }
 +    }
 +    
 +    pos = ncd + 1;
 +    /* Store the cell boundaries of the lower dimensions at the end */
 +    for(d1=0; d1<d; d1++)
 +    {
 +        root->cell_f[pos++] = comm->cell_f0[d1];
 +        root->cell_f[pos++] = comm->cell_f1[d1];
 +    }
 +    
 +    if (d < comm->npmedecompdim)
 +    {
 +        /* The master determines the maximum shift for
 +         * the coordinate communication between separate PME nodes.
 +         */
 +        set_pme_maxshift(dd,&comm->ddpme[d],bUniform,ddbox,root->cell_f);
 +    }
 +    root->cell_f[pos++] = comm->ddpme[0].maxshift;
 +    if (d >= 1)
 +    {
 +        root->cell_f[pos++] = comm->ddpme[1].maxshift;
 +    }
 +}    
 +
 +static void relative_to_absolute_cell_bounds(gmx_domdec_t *dd,
 +                                             gmx_ddbox_t *ddbox,int dimind)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int dim;
 +
 +    comm = dd->comm;
 +
 +    /* Set the cell dimensions */
 +    dim = dd->dim[dimind];
 +    comm->cell_x0[dim] = comm->cell_f0[dimind]*ddbox->box_size[dim];
 +    comm->cell_x1[dim] = comm->cell_f1[dimind]*ddbox->box_size[dim];
 +    if (dim >= ddbox->nboundeddim)
 +    {
 +        comm->cell_x0[dim] += ddbox->box0[dim];
 +        comm->cell_x1[dim] += ddbox->box0[dim];
 +    }
 +}
 +
 +static void distribute_dd_cell_sizes_dlb(gmx_domdec_t *dd,
 +                                         int d,int dim,real *cell_f_row,
 +                                         gmx_ddbox_t *ddbox)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int d1,dim1,pos;
 +
 +    comm = dd->comm;
 +
 +#ifdef GMX_MPI
 +    /* Each node would only need to know two fractions,
 +     * but it is probably cheaper to broadcast the whole array.
 +     */
 +    MPI_Bcast(cell_f_row,DD_CELL_F_SIZE(dd,d)*sizeof(real),MPI_BYTE,
 +              0,comm->mpi_comm_load[d]);
 +#endif
 +    /* Copy the fractions for this dimension from the buffer */
 +    comm->cell_f0[d] = cell_f_row[dd->ci[dim]  ];
 +    comm->cell_f1[d] = cell_f_row[dd->ci[dim]+1];
 +    /* The whole array was communicated, so set the buffer position */
 +    pos = dd->nc[dim] + 1;
 +    for(d1=0; d1<=d; d1++)
 +    {
 +        if (d1 < d)
 +        {
 +            /* Copy the cell fractions of the lower dimensions */
 +            comm->cell_f0[d1] = cell_f_row[pos++];
 +            comm->cell_f1[d1] = cell_f_row[pos++];
 +        }
 +        relative_to_absolute_cell_bounds(dd,ddbox,d1);
 +    }
 +    /* Convert the communicated shift from float to int */
 +    comm->ddpme[0].maxshift = (int)(cell_f_row[pos++] + 0.5);
 +    if (d >= 1)
 +    {
 +        comm->ddpme[1].maxshift = (int)(cell_f_row[pos++] + 0.5);
 +    }
 +}
 +
 +static void set_dd_cell_sizes_dlb_change(gmx_domdec_t *dd,
 +                                         gmx_ddbox_t *ddbox,gmx_bool bDynamicBox,
 +                                         gmx_bool bUniform,gmx_large_int_t step)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int d,dim,d1;
 +    gmx_bool bRowMember,bRowRoot;
 +    real *cell_f_row;
 +    
 +    comm = dd->comm;
 +
 +    for(d=0; d<dd->ndim; d++)
 +    {
 +        dim = dd->dim[d];
 +        bRowMember = TRUE;
 +        bRowRoot = TRUE;
 +        for(d1=d; d1<dd->ndim; d1++)
 +        {
 +            if (dd->ci[dd->dim[d1]] > 0)
 +            {
 +                if (d1 > d)
 +                {
 +                    bRowMember = FALSE;
 +                }
 +                bRowRoot = FALSE;
 +            }
 +        }
 +        if (bRowMember)
 +        {
 +            if (bRowRoot)
 +            {
 +                set_dd_cell_sizes_dlb_root(dd,d,dim,comm->root[d],
 +                                           ddbox,bDynamicBox,bUniform,step);
 +                cell_f_row = comm->root[d]->cell_f;
 +            }
 +            else
 +            {
 +                cell_f_row = comm->cell_f_row;
 +            }
 +            distribute_dd_cell_sizes_dlb(dd,d,dim,cell_f_row,ddbox);
 +        }
 +    }
 +}    
 +
 +static void set_dd_cell_sizes_dlb_nochange(gmx_domdec_t *dd,gmx_ddbox_t *ddbox)
 +{
 +    int d;
 +
 +    /* This function assumes the box is static and should therefore
 +     * not be called when the box has changed since the last
 +     * call to dd_partition_system.
 +     */
 +    for(d=0; d<dd->ndim; d++)
 +    {
 +        relative_to_absolute_cell_bounds(dd,ddbox,d); 
 +    }
 +}
 +
 +
 +
 +static void set_dd_cell_sizes_dlb(gmx_domdec_t *dd,
 +                                  gmx_ddbox_t *ddbox,gmx_bool bDynamicBox,
 +                                  gmx_bool bUniform,gmx_bool bDoDLB,gmx_large_int_t step,
 +                                  gmx_wallcycle_t wcycle)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int dim;
 +
 +    comm = dd->comm;
 +    
 +    if (bDoDLB)
 +    {
 +        wallcycle_start(wcycle,ewcDDCOMMBOUND);
 +        set_dd_cell_sizes_dlb_change(dd,ddbox,bDynamicBox,bUniform,step);
 +        wallcycle_stop(wcycle,ewcDDCOMMBOUND);
 +    }
 +    else if (bDynamicBox)
 +    {
 +        set_dd_cell_sizes_dlb_nochange(dd,ddbox);
 +    }
 +    
 +    /* Set the dimensions for which no DD is used */
 +    for(dim=0; dim<DIM; dim++) {
 +        if (dd->nc[dim] == 1) {
 +            comm->cell_x0[dim] = 0;
 +            comm->cell_x1[dim] = ddbox->box_size[dim];
 +            if (dim >= ddbox->nboundeddim)
 +            {
 +                comm->cell_x0[dim] += ddbox->box0[dim];
 +                comm->cell_x1[dim] += ddbox->box0[dim];
 +            }
 +        }
 +    }
 +}
 +
 +static void realloc_comm_ind(gmx_domdec_t *dd,ivec npulse)
 +{
 +    int d,np,i;
 +    gmx_domdec_comm_dim_t *cd;
 +    
 +    for(d=0; d<dd->ndim; d++)
 +    {
 +        cd = &dd->comm->cd[d];
 +        np = npulse[dd->dim[d]];
 +        if (np > cd->np_nalloc)
 +        {
 +            if (debug)
 +            {
 +                fprintf(debug,"(Re)allocing cd for %c to %d pulses\n",
 +                        dim2char(dd->dim[d]),np);
 +            }
 +            if (DDMASTER(dd) && cd->np_nalloc > 0)
 +            {
 +                fprintf(stderr,"\nIncreasing the number of cell to communicate in dimension %c to %d for the first time\n",dim2char(dd->dim[d]),np);
 +            }
 +            srenew(cd->ind,np);
 +            for(i=cd->np_nalloc; i<np; i++)
 +            {
 +                cd->ind[i].index  = NULL;
 +                cd->ind[i].nalloc = 0;
 +            }
 +            cd->np_nalloc = np;
 +        }
 +        cd->np = np;
 +    }
 +}
 +
 +
 +static void set_dd_cell_sizes(gmx_domdec_t *dd,
 +                              gmx_ddbox_t *ddbox,gmx_bool bDynamicBox,
 +                              gmx_bool bUniform,gmx_bool bDoDLB,gmx_large_int_t step,
 +                              gmx_wallcycle_t wcycle)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int  d;
 +    ivec npulse;
 +    
 +    comm = dd->comm;
 +
 +    /* Copy the old cell boundaries for the cg displacement check */
 +    copy_rvec(comm->cell_x0,comm->old_cell_x0);
 +    copy_rvec(comm->cell_x1,comm->old_cell_x1);
 +    
 +    if (comm->bDynLoadBal)
 +    {
 +        if (DDMASTER(dd))
 +        {
 +            check_box_size(dd,ddbox);
 +        }
 +        set_dd_cell_sizes_dlb(dd,ddbox,bDynamicBox,bUniform,bDoDLB,step,wcycle);
 +    }
 +    else
 +    {
 +        set_dd_cell_sizes_slb(dd,ddbox,FALSE,npulse);
 +        realloc_comm_ind(dd,npulse);
 +    }
 +    
 +    if (debug)
 +    {
 +        for(d=0; d<DIM; d++)
 +        {
 +            fprintf(debug,"cell_x[%d] %f - %f skew_fac %f\n",
 +                    d,comm->cell_x0[d],comm->cell_x1[d],ddbox->skew_fac[d]);
 +        }
 +    }
 +}
 +
 +static void comm_dd_ns_cell_sizes(gmx_domdec_t *dd,
 +                                  gmx_ddbox_t *ddbox,
 +                                  rvec cell_ns_x0,rvec cell_ns_x1,
 +                                  gmx_large_int_t step)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int dim_ind,dim;
 +    
 +    comm = dd->comm;
 +
 +    for(dim_ind=0; dim_ind<dd->ndim; dim_ind++)
 +    {
 +        dim = dd->dim[dim_ind];
 +        
 +        /* Without PBC we don't have restrictions on the outer cells */
 +        if (!(dim >= ddbox->npbcdim && 
 +              (dd->ci[dim] == 0 || dd->ci[dim] == dd->nc[dim] - 1)) &&
 +            comm->bDynLoadBal &&
 +            (comm->cell_x1[dim] - comm->cell_x0[dim])*ddbox->skew_fac[dim] <
 +            comm->cellsize_min[dim])
 +        {
 +            char buf[22];
 +            gmx_fatal(FARGS,"Step %s: The %c-size (%f) times the triclinic skew factor (%f) is smaller than the smallest allowed cell size (%f) for domain decomposition grid cell %d %d %d",
 +                      gmx_step_str(step,buf),dim2char(dim),
 +                      comm->cell_x1[dim] - comm->cell_x0[dim],
 +                      ddbox->skew_fac[dim],
 +                      dd->comm->cellsize_min[dim],
 +                      dd->ci[XX],dd->ci[YY],dd->ci[ZZ]);
 +        }
 +    }
 +    
 +    if ((dd->bGridJump && dd->ndim > 1) || ddbox->nboundeddim < DIM)
 +    {
 +        /* Communicate the boundaries and update cell_ns_x0/1 */
 +        dd_move_cellx(dd,ddbox,cell_ns_x0,cell_ns_x1);
 +        if (dd->bGridJump && dd->ndim > 1)
 +        {
 +            check_grid_jump(step,dd,dd->comm->cutoff,ddbox,TRUE);
 +        }
 +    }
 +}
 +
 +static void make_tric_corr_matrix(int npbcdim,matrix box,matrix tcm)
 +{
 +    if (YY < npbcdim)
 +    {
 +        tcm[YY][XX] = -box[YY][XX]/box[YY][YY];
 +    }
 +    else
 +    {
 +        tcm[YY][XX] = 0;
 +    }
 +    if (ZZ < npbcdim)
 +    {
 +        tcm[ZZ][XX] = -(box[ZZ][YY]*tcm[YY][XX] + box[ZZ][XX])/box[ZZ][ZZ];
 +        tcm[ZZ][YY] = -box[ZZ][YY]/box[ZZ][ZZ];
 +    }
 +    else
 +    {
 +        tcm[ZZ][XX] = 0;
 +        tcm[ZZ][YY] = 0;
 +    }
 +}
 +
 +static void check_screw_box(matrix box)
 +{
 +    /* Mathematical limitation */
 +    if (box[YY][XX] != 0 || box[ZZ][XX] != 0)
 +    {
 +        gmx_fatal(FARGS,"With screw pbc the unit cell can not have non-zero off-diagonal x-components");
 +    }
 +    
 +    /* Limitation due to the asymmetry of the eighth shell method */
 +    if (box[ZZ][YY] != 0)
 +    {
 +        gmx_fatal(FARGS,"pbc=screw with non-zero box_zy is not supported");
 +    }
 +}
 +
 +static void distribute_cg(FILE *fplog,gmx_large_int_t step,
 +                          matrix box,ivec tric_dir,t_block *cgs,rvec pos[],
 +                          gmx_domdec_t *dd)
 +{
 +    gmx_domdec_master_t *ma;
 +    int **tmp_ind=NULL,*tmp_nalloc=NULL;
 +    int  i,icg,j,k,k0,k1,d,npbcdim;
 +    matrix tcm;
 +    rvec box_size,cg_cm;
 +    ivec ind;
 +    real nrcg,inv_ncg,pos_d;
 +    atom_id *cgindex;
 +    gmx_bool bUnbounded,bScrew;
 +
 +    ma = dd->ma;
 +    
 +    if (tmp_ind == NULL)
 +    {
 +        snew(tmp_nalloc,dd->nnodes);
 +        snew(tmp_ind,dd->nnodes);
 +        for(i=0; i<dd->nnodes; i++)
 +        {
 +            tmp_nalloc[i] = over_alloc_large(cgs->nr/dd->nnodes+1);
 +            snew(tmp_ind[i],tmp_nalloc[i]);
 +        }
 +    }
 +    
 +    /* Clear the count */
 +    for(i=0; i<dd->nnodes; i++)
 +    {
 +        ma->ncg[i] = 0;
 +        ma->nat[i] = 0;
 +    }
 +    
 +    make_tric_corr_matrix(dd->npbcdim,box,tcm);
 +    
 +    cgindex = cgs->index;
 +    
 +    /* Compute the center of geometry for all charge groups */
 +    for(icg=0; icg<cgs->nr; icg++)
 +    {
 +        k0      = cgindex[icg];
 +        k1      = cgindex[icg+1];
 +        nrcg    = k1 - k0;
 +        if (nrcg == 1)
 +        {
 +            copy_rvec(pos[k0],cg_cm);
 +        }
 +        else
 +        {
 +            inv_ncg = 1.0/nrcg;
 +            
 +            clear_rvec(cg_cm);
 +            for(k=k0; (k<k1); k++)
 +            {
 +                rvec_inc(cg_cm,pos[k]);
 +            }
 +            for(d=0; (d<DIM); d++)
 +            {
 +                cg_cm[d] *= inv_ncg;
 +            }
 +        }
 +        /* Put the charge group in the box and determine the cell index */
 +        for(d=DIM-1; d>=0; d--) {
 +            pos_d = cg_cm[d];
 +            if (d < dd->npbcdim)
 +            {
 +                bScrew = (dd->bScrewPBC && d == XX);
 +                if (tric_dir[d] && dd->nc[d] > 1)
 +                {
 +                    /* Use triclinic coordintates for this dimension */
 +                    for(j=d+1; j<DIM; j++)
 +                    {
 +                        pos_d += cg_cm[j]*tcm[j][d];
 +                    }
 +                }
 +                while(pos_d >= box[d][d])
 +                {
 +                    pos_d -= box[d][d];
 +                    rvec_dec(cg_cm,box[d]);
 +                    if (bScrew)
 +                    {
 +                        cg_cm[YY] = box[YY][YY] - cg_cm[YY];
 +                        cg_cm[ZZ] = box[ZZ][ZZ] - cg_cm[ZZ];
 +                    }
 +                    for(k=k0; (k<k1); k++)
 +                    {
 +                        rvec_dec(pos[k],box[d]);
 +                        if (bScrew)
 +                        {
 +                            pos[k][YY] = box[YY][YY] - pos[k][YY];
 +                            pos[k][ZZ] = box[ZZ][ZZ] - pos[k][ZZ];
 +                        }
 +                    }
 +                }
 +                while(pos_d < 0)
 +                {
 +                    pos_d += box[d][d];
 +                    rvec_inc(cg_cm,box[d]);
 +                    if (bScrew)
 +                    {
 +                        cg_cm[YY] = box[YY][YY] - cg_cm[YY];
 +                        cg_cm[ZZ] = box[ZZ][ZZ] - cg_cm[ZZ];
 +                    }
 +                    for(k=k0; (k<k1); k++)
 +                    {
 +                        rvec_inc(pos[k],box[d]);
 +                        if (bScrew) {
 +                            pos[k][YY] = box[YY][YY] - pos[k][YY];
 +                            pos[k][ZZ] = box[ZZ][ZZ] - pos[k][ZZ];
 +                        }
 +                    }
 +                }
 +            }
 +            /* This could be done more efficiently */
 +            ind[d] = 0;
 +            while(ind[d]+1 < dd->nc[d] && pos_d >= ma->cell_x[d][ind[d]+1])
 +            {
 +                ind[d]++;
 +            }
 +        }
 +        i = dd_index(dd->nc,ind);
 +        if (ma->ncg[i] == tmp_nalloc[i])
 +        {
 +            tmp_nalloc[i] = over_alloc_large(ma->ncg[i]+1);
 +            srenew(tmp_ind[i],tmp_nalloc[i]);
 +        }
 +        tmp_ind[i][ma->ncg[i]] = icg;
 +        ma->ncg[i]++;
 +        ma->nat[i] += cgindex[icg+1] - cgindex[icg];
 +    }
 +    
 +    k1 = 0;
 +    for(i=0; i<dd->nnodes; i++)
 +    {
 +        ma->index[i] = k1;
 +        for(k=0; k<ma->ncg[i]; k++)
 +        {
 +            ma->cg[k1++] = tmp_ind[i][k];
 +        }
 +    }
 +    ma->index[dd->nnodes] = k1;
 +    
 +    for(i=0; i<dd->nnodes; i++)
 +    {
 +        sfree(tmp_ind[i]);
 +    }
 +    sfree(tmp_ind);
 +    sfree(tmp_nalloc);
 +    
 +    if (fplog)
 +    {
 +        char buf[22];
 +        fprintf(fplog,"Charge group distribution at step %s:",
 +                gmx_step_str(step,buf));
 +        for(i=0; i<dd->nnodes; i++)
 +        {
 +            fprintf(fplog," %d",ma->ncg[i]);
 +        }
 +        fprintf(fplog,"\n");
 +    }
 +}
 +
 +static void get_cg_distribution(FILE *fplog,gmx_large_int_t step,gmx_domdec_t *dd,
 +                                t_block *cgs,matrix box,gmx_ddbox_t *ddbox,
 +                                rvec pos[])
 +{
 +    gmx_domdec_master_t *ma=NULL;
 +    ivec npulse;
 +    int  i,cg_gl;
 +    int  *ibuf,buf2[2] = { 0, 0 };
 +    gmx_bool bMaster = DDMASTER(dd);
 +    if (bMaster)
 +    {
 +        ma = dd->ma;
 +        
 +        if (dd->bScrewPBC)
 +        {
 +            check_screw_box(box);
 +        }
 +    
 +        set_dd_cell_sizes_slb(dd,ddbox,TRUE,npulse);
 +    
 +        distribute_cg(fplog,step,box,ddbox->tric_dir,cgs,pos,dd);
 +        for(i=0; i<dd->nnodes; i++)
 +        {
 +            ma->ibuf[2*i]   = ma->ncg[i];
 +            ma->ibuf[2*i+1] = ma->nat[i];
 +        }
 +        ibuf = ma->ibuf;
 +    }
 +    else
 +    {
 +        ibuf = NULL;
 +    }
 +    dd_scatter(dd,2*sizeof(int),ibuf,buf2);
 +    
 +    dd->ncg_home = buf2[0];
 +    dd->nat_home = buf2[1];
 +    dd->ncg_tot  = dd->ncg_home;
 +    dd->nat_tot  = dd->nat_home;
 +    if (dd->ncg_home > dd->cg_nalloc || dd->cg_nalloc == 0)
 +    {
 +        dd->cg_nalloc = over_alloc_dd(dd->ncg_home);
 +        srenew(dd->index_gl,dd->cg_nalloc);
 +        srenew(dd->cgindex,dd->cg_nalloc+1);
 +    }
 +    if (bMaster)
 +    {
 +        for(i=0; i<dd->nnodes; i++)
 +        {
 +            ma->ibuf[i] = ma->ncg[i]*sizeof(int);
 +            ma->ibuf[dd->nnodes+i] = ma->index[i]*sizeof(int);
 +        }
 +    }
 +    
 +    dd_scatterv(dd,
 +                DDMASTER(dd) ? ma->ibuf : NULL,
 +                DDMASTER(dd) ? ma->ibuf+dd->nnodes : NULL,
 +                DDMASTER(dd) ? ma->cg : NULL,
 +                dd->ncg_home*sizeof(int),dd->index_gl);
 +    
 +    /* Determine the home charge group sizes */
 +    dd->cgindex[0] = 0;
 +    for(i=0; i<dd->ncg_home; i++)
 +    {
 +        cg_gl = dd->index_gl[i];
 +        dd->cgindex[i+1] =
 +            dd->cgindex[i] + cgs->index[cg_gl+1] - cgs->index[cg_gl];
 +    }
 +    
 +    if (debug)
 +    {
 +        fprintf(debug,"Home charge groups:\n");
 +        for(i=0; i<dd->ncg_home; i++)
 +        {
 +            fprintf(debug," %d",dd->index_gl[i]);
 +            if (i % 10 == 9) 
 +                fprintf(debug,"\n");
 +        }
 +        fprintf(debug,"\n");
 +    }
 +}
 +
 +static int compact_and_copy_vec_at(int ncg,int *move,
 +                                   int *cgindex,
 +                                   int nvec,int vec,
 +                                   rvec *src,gmx_domdec_comm_t *comm,
 +                                   gmx_bool bCompact)
 +{
 +    int m,icg,i,i0,i1,nrcg;
 +    int home_pos;
 +    int pos_vec[DIM*2];
 +    
 +    home_pos = 0;
 +
 +    for(m=0; m<DIM*2; m++)
 +    {
 +        pos_vec[m] = 0;
 +    }
 +    
 +    i0 = 0;
 +    for(icg=0; icg<ncg; icg++)
 +    {
 +        i1 = cgindex[icg+1];
 +        m = move[icg];
 +        if (m == -1)
 +        {
 +            if (bCompact)
 +            {
 +                /* Compact the home array in place */
 +                for(i=i0; i<i1; i++)
 +                {
 +                    copy_rvec(src[i],src[home_pos++]);
 +                }
 +            }
 +        }
 +        else
 +        {
 +            /* Copy to the communication buffer */
 +            nrcg = i1 - i0;
 +            pos_vec[m] += 1 + vec*nrcg;
 +            for(i=i0; i<i1; i++)
 +            {
 +                copy_rvec(src[i],comm->cgcm_state[m][pos_vec[m]++]);
 +            }
 +            pos_vec[m] += (nvec - vec - 1)*nrcg;
 +        }
 +        if (!bCompact)
 +        {
 +            home_pos += i1 - i0;
 +        }
 +        i0 = i1;
 +    }
 +    
 +    return home_pos;
 +}
 +
 +static int compact_and_copy_vec_cg(int ncg,int *move,
 +                                   int *cgindex,
 +                                   int nvec,rvec *src,gmx_domdec_comm_t *comm,
 +                                   gmx_bool bCompact)
 +{
 +    int m,icg,i0,i1,nrcg;
 +    int home_pos;
 +    int pos_vec[DIM*2];
 +    
 +    home_pos = 0;
 +    
 +    for(m=0; m<DIM*2; m++)
 +    {
 +        pos_vec[m] = 0;
 +    }
 +    
 +    i0 = 0;
 +    for(icg=0; icg<ncg; icg++)
 +    {
 +        i1 = cgindex[icg+1];
 +        m = move[icg];
 +        if (m == -1)
 +        {
 +            if (bCompact)
 +            {
 +                /* Compact the home array in place */
 +                copy_rvec(src[icg],src[home_pos++]);
 +            }
 +        }
 +        else
 +        {
 +            nrcg = i1 - i0;
 +            /* Copy to the communication buffer */
 +            copy_rvec(src[icg],comm->cgcm_state[m][pos_vec[m]]);
 +            pos_vec[m] += 1 + nrcg*nvec;
 +        }
 +        i0 = i1;
 +    }
 +    if (!bCompact)
 +    {
 +        home_pos = ncg;
 +    }
 +    
 +    return home_pos;
 +}
 +
 +static int compact_ind(int ncg,int *move,
 +                       int *index_gl,int *cgindex,
 +                       int *gatindex,
 +                       gmx_ga2la_t ga2la,char *bLocalCG,
 +                       int *cginfo)
 +{
 +    int cg,nat,a0,a1,a,a_gl;
 +    int home_pos;
 +
 +    home_pos = 0;
 +    nat = 0;
 +    for(cg=0; cg<ncg; cg++)
 +    {
 +        a0 = cgindex[cg];
 +        a1 = cgindex[cg+1];
 +        if (move[cg] == -1)
 +        {
 +            /* Compact the home arrays in place.
 +             * Anything that can be done here avoids access to global arrays.
 +             */
 +            cgindex[home_pos] = nat;
 +            for(a=a0; a<a1; a++)
 +            {
 +                a_gl = gatindex[a];
 +                gatindex[nat] = a_gl;
 +                /* The cell number stays 0, so we don't need to set it */
 +                ga2la_change_la(ga2la,a_gl,nat);
 +                nat++;
 +            }
 +            index_gl[home_pos] = index_gl[cg];
 +            cginfo[home_pos]   = cginfo[cg];
 +            /* The charge group remains local, so bLocalCG does not change */
 +            home_pos++;
 +        }
 +        else
 +        {
 +            /* Clear the global indices */
 +            for(a=a0; a<a1; a++)
 +            {
 +                ga2la_del(ga2la,gatindex[a]);
 +            }
 +            if (bLocalCG)
 +            {
 +                bLocalCG[index_gl[cg]] = FALSE;
 +            }
 +        }
 +    }
 +    cgindex[home_pos] = nat;
 +    
 +    return home_pos;
 +}
 +
 +static void clear_and_mark_ind(int ncg,int *move,
 +                               int *index_gl,int *cgindex,int *gatindex,
 +                               gmx_ga2la_t ga2la,char *bLocalCG,
 +                               int *cell_index)
 +{
 +    int cg,a0,a1,a;
 +    
 +    for(cg=0; cg<ncg; cg++)
 +    {
 +        if (move[cg] >= 0)
 +        {
 +            a0 = cgindex[cg];
 +            a1 = cgindex[cg+1];
 +            /* Clear the global indices */
 +            for(a=a0; a<a1; a++)
 +            {
 +                ga2la_del(ga2la,gatindex[a]);
 +            }
 +            if (bLocalCG)
 +            {
 +                bLocalCG[index_gl[cg]] = FALSE;
 +            }
 +            /* Signal that this cg has moved using the ns cell index.
 +             * Here we set it to -1. fill_grid will change it
 +             * from -1 to NSGRID_SIGNAL_MOVED_FAC*grid->ncells.
 +             */
 +            cell_index[cg] = -1;
 +        }
 +    }
 +}
 +
 +static void print_cg_move(FILE *fplog,
 +                          gmx_domdec_t *dd,
 +                          gmx_large_int_t step,int cg,int dim,int dir,
 +                          gmx_bool bHaveLimitdAndCMOld,real limitd,
 +                          rvec cm_old,rvec cm_new,real pos_d)
 +{
 +    gmx_domdec_comm_t *comm;
 +    char buf[22];
 +
 +    comm = dd->comm;
 +
 +    fprintf(fplog,"\nStep %s:\n",gmx_step_str(step,buf));
 +    if (bHaveLimitdAndCMOld)
 +    {
 +        fprintf(fplog,"The charge group starting at atom %d moved more than the distance allowed by the domain decomposition (%f) in direction %c\n",
 +                ddglatnr(dd,dd->cgindex[cg]),limitd,dim2char(dim));
 +    }
 +    else
 +    {
 +        fprintf(fplog,"The charge group starting at atom %d moved than the distance allowed by the domain decomposition in direction %c\n",
 +                ddglatnr(dd,dd->cgindex[cg]),dim2char(dim));
 +    }
 +    fprintf(fplog,"distance out of cell %f\n",
 +            dir==1 ? pos_d - comm->cell_x1[dim] : pos_d - comm->cell_x0[dim]);
 +    if (bHaveLimitdAndCMOld)
 +    {
 +        fprintf(fplog,"Old coordinates: %8.3f %8.3f %8.3f\n",
 +                cm_old[XX],cm_old[YY],cm_old[ZZ]);
 +    }
 +    fprintf(fplog,"New coordinates: %8.3f %8.3f %8.3f\n",
 +            cm_new[XX],cm_new[YY],cm_new[ZZ]);
 +    fprintf(fplog,"Old cell boundaries in direction %c: %8.3f %8.3f\n",
 +            dim2char(dim),
 +            comm->old_cell_x0[dim],comm->old_cell_x1[dim]);
 +    fprintf(fplog,"New cell boundaries in direction %c: %8.3f %8.3f\n",
 +            dim2char(dim),
 +            comm->cell_x0[dim],comm->cell_x1[dim]);
 +}
 +
 +static void cg_move_error(FILE *fplog,
 +                          gmx_domdec_t *dd,
 +                          gmx_large_int_t step,int cg,int dim,int dir,
 +                          gmx_bool bHaveLimitdAndCMOld,real limitd,
 +                          rvec cm_old,rvec cm_new,real pos_d)
 +{
 +    if (fplog)
 +    {
 +        print_cg_move(fplog, dd,step,cg,dim,dir,
 +                      bHaveLimitdAndCMOld,limitd,cm_old,cm_new,pos_d);
 +    }
 +    print_cg_move(stderr,dd,step,cg,dim,dir,
 +                  bHaveLimitdAndCMOld,limitd,cm_old,cm_new,pos_d);
 +    gmx_fatal(FARGS,
 +              "A charge group moved too far between two domain decomposition steps\n"
 +              "This usually means that your system is not well equilibrated");
 +}
 +
 +static void rotate_state_atom(t_state *state,int a)
 +{
 +    int est;
 +
 +    for(est=0; est<estNR; est++)
 +    {
 +        if (EST_DISTR(est) && (state->flags & (1<<est))) {
 +            switch (est) {
 +            case estX:
 +                /* Rotate the complete state; for a rectangular box only */
 +                state->x[a][YY] = state->box[YY][YY] - state->x[a][YY];
 +                state->x[a][ZZ] = state->box[ZZ][ZZ] - state->x[a][ZZ];
 +                break;
 +            case estV:
 +                state->v[a][YY] = -state->v[a][YY];
 +                state->v[a][ZZ] = -state->v[a][ZZ];
 +                break;
 +            case estSDX:
 +                state->sd_X[a][YY] = -state->sd_X[a][YY];
 +                state->sd_X[a][ZZ] = -state->sd_X[a][ZZ];
 +                break;
 +            case estCGP:
 +                state->cg_p[a][YY] = -state->cg_p[a][YY];
 +                state->cg_p[a][ZZ] = -state->cg_p[a][ZZ];
 +                break;
 +            case estDISRE_INITF:
 +            case estDISRE_RM3TAV:
 +            case estORIRE_INITF:
 +            case estORIRE_DTAV:
 +                /* These are distances, so not affected by rotation */
 +                break;
 +            default:
 +                gmx_incons("Unknown state entry encountered in rotate_state_atom");            
 +            }
 +        }
 +    }
 +}
 +
 +static int *get_moved(gmx_domdec_comm_t *comm,int natoms)
 +{
 +    if (natoms > comm->moved_nalloc)
 +    {
 +        /* Contents should be preserved here */
 +        comm->moved_nalloc = over_alloc_dd(natoms);
 +        srenew(comm->moved,comm->moved_nalloc);
 +    }
 +
 +    return comm->moved;
 +}
 +
 +static void calc_cg_move(FILE *fplog,gmx_large_int_t step,
 +                         gmx_domdec_t *dd,
 +                         t_state *state,
 +                         ivec tric_dir,matrix tcm,
 +                         rvec cell_x0,rvec cell_x1,
 +                         rvec limitd,rvec limit0,rvec limit1,
 +                         const int *cgindex,
 +                         int cg_start,int cg_end,
 +                         rvec *cg_cm,
 +                         int *move)
 +{
 +    int  npbcdim;
 +    int  c,i,cg,k,k0,k1,d,dim,dim2,dir,d2,d3,d4,cell_d;
 +    int  mc,cdd,nrcg,ncg_recv,nat_recv,nvs,nvr,nvec,vec;
 +    int  flag;
 +    gmx_bool bScrew;
 +    ivec dev;
 +    real inv_ncg,pos_d;
 +    rvec cm_new;
 +
 +    npbcdim = dd->npbcdim;
 +
 +    for(cg=cg_start; cg<cg_end; cg++)
 +    {
 +        k0   = cgindex[cg];
 +        k1   = cgindex[cg+1];
 +        nrcg = k1 - k0;
 +        if (nrcg == 1)
 +        {
 +            copy_rvec(state->x[k0],cm_new);
 +        }
 +        else
 +        {
 +            inv_ncg = 1.0/nrcg;
 +            
 +            clear_rvec(cm_new);
 +            for(k=k0; (k<k1); k++)
 +            {
 +                rvec_inc(cm_new,state->x[k]);
 +            }
 +            for(d=0; (d<DIM); d++)
 +            {
 +                cm_new[d] = inv_ncg*cm_new[d];
 +            }
 +        }
 +        
 +        clear_ivec(dev);
 +        /* Do pbc and check DD cell boundary crossings */
 +        for(d=DIM-1; d>=0; d--)
 +        {
 +            if (dd->nc[d] > 1)
 +            {
 +                bScrew = (dd->bScrewPBC && d == XX);
 +                /* Determine the location of this cg in lattice coordinates */
 +                pos_d = cm_new[d];
 +                if (tric_dir[d])
 +                {
 +                    for(d2=d+1; d2<DIM; d2++)
 +                    {
 +                        pos_d += cm_new[d2]*tcm[d2][d];
 +                    }
 +                }
 +                /* Put the charge group in the triclinic unit-cell */
 +                if (pos_d >= cell_x1[d])
 +                {
 +                    if (pos_d >= limit1[d])
 +                    {
 +                        cg_move_error(fplog,dd,step,cg,d,1,TRUE,limitd[d],
 +                                      cg_cm[cg],cm_new,pos_d);
 +                    }
 +                    dev[d] = 1;
 +                    if (dd->ci[d] == dd->nc[d] - 1)
 +                    {
 +                        rvec_dec(cm_new,state->box[d]);
 +                        if (bScrew)
 +                        {
 +                            cm_new[YY] = state->box[YY][YY] - cm_new[YY];
 +                            cm_new[ZZ] = state->box[ZZ][ZZ] - cm_new[ZZ];
 +                        }
 +                        for(k=k0; (k<k1); k++)
 +                        {
 +                            rvec_dec(state->x[k],state->box[d]);
 +                            if (bScrew)
 +                            {
 +                                rotate_state_atom(state,k);
 +                            }
 +                        }
 +                    }
 +                }
 +                else if (pos_d < cell_x0[d])
 +                {
 +                    if (pos_d < limit0[d])
 +                    {
 +                        cg_move_error(fplog,dd,step,cg,d,-1,TRUE,limitd[d],
 +                                      cg_cm[cg],cm_new,pos_d);
 +                    }
 +                    dev[d] = -1;
 +                    if (dd->ci[d] == 0)
 +                    {
 +                        rvec_inc(cm_new,state->box[d]);
 +                        if (bScrew)
 +                        {
 +                            cm_new[YY] = state->box[YY][YY] - cm_new[YY];
 +                            cm_new[ZZ] = state->box[ZZ][ZZ] - cm_new[ZZ];
 +                        }
 +                        for(k=k0; (k<k1); k++)
 +                        {
 +                            rvec_inc(state->x[k],state->box[d]);
 +                            if (bScrew)
 +                            {
 +                                rotate_state_atom(state,k);
 +                            }
 +                        }
 +                    }
 +                }
 +            }
 +            else if (d < npbcdim)
 +            {
 +                /* Put the charge group in the rectangular unit-cell */
 +                while (cm_new[d] >= state->box[d][d])
 +                {
 +                    rvec_dec(cm_new,state->box[d]);
 +                    for(k=k0; (k<k1); k++)
 +                    {
 +                        rvec_dec(state->x[k],state->box[d]);
 +                    }
 +                }
 +                while (cm_new[d] < 0)
 +                {
 +                    rvec_inc(cm_new,state->box[d]);
 +                    for(k=k0; (k<k1); k++)
 +                    {
 +                        rvec_inc(state->x[k],state->box[d]);
 +                    }
 +                }
 +            }
 +        }
 +    
 +        copy_rvec(cm_new,cg_cm[cg]);
 +        
 +        /* Determine where this cg should go */
 +        flag = 0;
 +        mc = -1;
 +        for(d=0; d<dd->ndim; d++)
 +        {
 +            dim = dd->dim[d];
 +            if (dev[dim] == 1)
 +            {
 +                flag |= DD_FLAG_FW(d);
 +                if (mc == -1)
 +                {
 +                    mc = d*2;
 +                }
 +            }
 +            else if (dev[dim] == -1)
 +            {
 +                flag |= DD_FLAG_BW(d);
 +                if (mc == -1) {
 +                    if (dd->nc[dim] > 2)
 +                    {
 +                        mc = d*2 + 1;
 +                    }
 +                    else
 +                    {
 +                        mc = d*2;
 +                    }
 +                }
 +            }
 +        }
 +        /* Temporarily store the flag in move */
 +        move[cg] = mc + flag;
 +    }
 +}
 +
 +static void dd_redistribute_cg(FILE *fplog,gmx_large_int_t step,
 +                               gmx_domdec_t *dd,ivec tric_dir,
 +                               t_state *state,rvec **f,
 +                               t_forcerec *fr,t_mdatoms *md,
 +                               gmx_bool bCompact,
 +                               t_nrnb *nrnb,
 +                               int *ncg_stay_home,
 +                               int *ncg_moved)
 +{
 +    int  *move;
 +    int  npbcdim;
 +    int  ncg[DIM*2],nat[DIM*2];
 +    int  c,i,cg,k,k0,k1,d,dim,dim2,dir,d2,d3,d4,cell_d;
 +    int  mc,cdd,nrcg,ncg_recv,nat_recv,nvs,nvr,nvec,vec;
 +    int  sbuf[2],rbuf[2];
 +    int  home_pos_cg,home_pos_at,buf_pos;
 +    int  flag;
 +    gmx_bool bV=FALSE,bSDX=FALSE,bCGP=FALSE;
 +    gmx_bool bScrew;
 +    ivec dev;
 +    real inv_ncg,pos_d;
 +    matrix tcm;
 +    rvec *cg_cm=NULL,cell_x0,cell_x1,limitd,limit0,limit1,cm_new;
 +    atom_id *cgindex;
 +    cginfo_mb_t *cginfo_mb;
 +    gmx_domdec_comm_t *comm;
 +    int  *moved;
 +    int  nthread,thread;
 +    
 +    if (dd->bScrewPBC)
 +    {
 +        check_screw_box(state->box);
 +    }
 +    
 +    comm  = dd->comm;
 +    if (fr->cutoff_scheme == ecutsGROUP)
 +    {
 +        cg_cm = fr->cg_cm;
 +    }
 +    
 +    for(i=0; i<estNR; i++)
 +    {
 +        if (EST_DISTR(i))
 +        {
 +            switch (i)
 +            {
 +            case estX:   /* Always present */            break;
 +            case estV:   bV   = (state->flags & (1<<i)); break;
 +            case estSDX: bSDX = (state->flags & (1<<i)); break;
 +            case estCGP: bCGP = (state->flags & (1<<i)); break;
 +            case estLD_RNG:
 +            case estLD_RNGI:
 +            case estDISRE_INITF:
 +            case estDISRE_RM3TAV:
 +            case estORIRE_INITF:
 +            case estORIRE_DTAV:
 +                /* No processing required */
 +                break;
 +            default:
 +            gmx_incons("Unknown state entry encountered in dd_redistribute_cg");
 +            }
 +        }
 +    }
 +    
 +    if (dd->ncg_tot > comm->nalloc_int)
 +    {
 +        comm->nalloc_int = over_alloc_dd(dd->ncg_tot);
 +        srenew(comm->buf_int,comm->nalloc_int);
 +    }
 +    move = comm->buf_int;
 +    
 +    /* Clear the count */
 +    for(c=0; c<dd->ndim*2; c++)
 +    {
 +        ncg[c] = 0;
 +        nat[c] = 0;
 +    }
 +
 +    npbcdim = dd->npbcdim;
 +
 +    for(d=0; (d<DIM); d++)
 +    {
 +        limitd[d] = dd->comm->cellsize_min[d];
 +        if (d >= npbcdim && dd->ci[d] == 0)
 +        {
 +            cell_x0[d] = -GMX_FLOAT_MAX;
 +        }
 +        else
 +        {
 +            cell_x0[d] = comm->cell_x0[d];
 +        }
 +        if (d >= npbcdim && dd->ci[d] == dd->nc[d] - 1)
 +        {
 +            cell_x1[d] = GMX_FLOAT_MAX;
 +        }
 +        else
 +        {
 +            cell_x1[d] = comm->cell_x1[d];
 +        }
 +        if (d < npbcdim)
 +        {
 +            limit0[d] = comm->old_cell_x0[d] - limitd[d];
 +            limit1[d] = comm->old_cell_x1[d] + limitd[d];
 +        }
 +        else
 +        {
 +            /* We check after communication if a charge group moved
 +             * more than one cell. Set the pre-comm check limit to float_max.
 +             */
 +            limit0[d] = -GMX_FLOAT_MAX;
 +            limit1[d] =  GMX_FLOAT_MAX;
 +        }
 +    }
 +    
 +    make_tric_corr_matrix(npbcdim,state->box,tcm);
 +    
 +    cgindex = dd->cgindex;
 +
 +    nthread = gmx_omp_nthreads_get(emntDomdec);
 +
 +    /* Compute the center of geometry for all home charge groups
 +     * and put them in the box and determine where they should go.
 +     */
 +#pragma omp parallel for num_threads(nthread) schedule(static)
 +    for(thread=0; thread<nthread; thread++)
 +    {
 +        calc_cg_move(fplog,step,dd,state,tric_dir,tcm,
 +                     cell_x0,cell_x1,limitd,limit0,limit1,
 +                     cgindex,
 +                     ( thread   *dd->ncg_home)/nthread,
 +                     ((thread+1)*dd->ncg_home)/nthread,
 +                     fr->cutoff_scheme==ecutsGROUP ? cg_cm : state->x,
 +                     move);
 +    }
 +
 +    for(cg=0; cg<dd->ncg_home; cg++)
 +    {
 +        if (move[cg] >= 0)
 +        {
 +            mc = move[cg];
 +            flag     = mc & ~DD_FLAG_NRCG;
 +            mc       = mc & DD_FLAG_NRCG;
 +            move[cg] = mc;
 +
 +            if (ncg[mc]+1 > comm->cggl_flag_nalloc[mc])
 +            {
 +                comm->cggl_flag_nalloc[mc] = over_alloc_dd(ncg[mc]+1);
 +                srenew(comm->cggl_flag[mc],comm->cggl_flag_nalloc[mc]*DD_CGIBS);
 +            }
 +            comm->cggl_flag[mc][ncg[mc]*DD_CGIBS  ] = dd->index_gl[cg];
 +            /* We store the cg size in the lower 16 bits
 +             * and the place where the charge group should go
 +             * in the next 6 bits. This saves some communication volume.
 +             */
 +            nrcg = cgindex[cg+1] - cgindex[cg];
 +            comm->cggl_flag[mc][ncg[mc]*DD_CGIBS+1] = nrcg | flag;
 +            ncg[mc] += 1;
 +            nat[mc] += nrcg;
 +        }
 +    }
 +    
 +    inc_nrnb(nrnb,eNR_CGCM,dd->nat_home);
 +    inc_nrnb(nrnb,eNR_RESETX,dd->ncg_home);
 +
 +    *ncg_moved = 0;
 +    for(i=0; i<dd->ndim*2; i++)
 +    {
 +        *ncg_moved += ncg[i];
 +    }
 +    
 +    nvec = 1;
 +    if (bV)
 +    {
 +        nvec++;
 +    }
 +    if (bSDX)
 +    {
 +        nvec++;
 +    }
 +    if (bCGP)
 +    {
 +        nvec++;
 +    }
 +    
 +    /* Make sure the communication buffers are large enough */
 +    for(mc=0; mc<dd->ndim*2; mc++)
 +    {
 +        nvr = ncg[mc] + nat[mc]*nvec;
 +        if (nvr > comm->cgcm_state_nalloc[mc])
 +        {
 +            comm->cgcm_state_nalloc[mc] = over_alloc_dd(nvr);
 +            srenew(comm->cgcm_state[mc],comm->cgcm_state_nalloc[mc]);
 +        }
 +    }
 +    
 +    switch (fr->cutoff_scheme)
 +    {
 +    case ecutsGROUP:
 +        /* Recalculating cg_cm might be cheaper than communicating,
 +         * but that could give rise to rounding issues.
 +         */
 +        home_pos_cg =
 +            compact_and_copy_vec_cg(dd->ncg_home,move,cgindex,
 +                                    nvec,cg_cm,comm,bCompact);
 +    break;
 +    case ecutsVERLET:
 +        /* Without charge groups we send the moved atom coordinates
 +         * over twice. This is so the code below can be used without
 +         * many conditionals for both for with and without charge groups.
 +         */
 +        home_pos_cg =
 +            compact_and_copy_vec_cg(dd->ncg_home,move,cgindex,
 +                                    nvec,state->x,comm,FALSE);
 +        if (bCompact)
 +        {
 +            home_pos_cg -= *ncg_moved;
 +        }
 +        break;
 +    default:
 +        gmx_incons("unimplemented");
 +        home_pos_cg = 0;
 +    }
 +    
 +    vec = 0;
 +    home_pos_at =
 +        compact_and_copy_vec_at(dd->ncg_home,move,cgindex,
 +                                nvec,vec++,state->x,comm,bCompact);
 +    if (bV)
 +    {
 +        compact_and_copy_vec_at(dd->ncg_home,move,cgindex,
 +                                nvec,vec++,state->v,comm,bCompact);
 +    }
 +    if (bSDX)
 +    {
 +        compact_and_copy_vec_at(dd->ncg_home,move,cgindex,
 +                                nvec,vec++,state->sd_X,comm,bCompact);
 +    }
 +    if (bCGP)
 +    {
 +        compact_and_copy_vec_at(dd->ncg_home,move,cgindex,
 +                                nvec,vec++,state->cg_p,comm,bCompact);
 +    }
 +    
 +    if (bCompact)
 +    {
 +        compact_ind(dd->ncg_home,move,
 +                    dd->index_gl,dd->cgindex,dd->gatindex,
 +                    dd->ga2la,comm->bLocalCG,
 +                    fr->cginfo);
 +    }
 +    else
 +    {
 +        if (fr->cutoff_scheme == ecutsVERLET)
 +        {
 +            moved = get_moved(comm,dd->ncg_home);
 +
 +            for(k=0; k<dd->ncg_home; k++)
 +            {
 +                moved[k] = 0;
 +            }
 +        }
 +        else
 +        {
 +            moved = fr->ns.grid->cell_index;
 +        }
 +
 +        clear_and_mark_ind(dd->ncg_home,move,
 +                           dd->index_gl,dd->cgindex,dd->gatindex,
 +                           dd->ga2la,comm->bLocalCG,
 +                           moved);
 +    }
 +    
 +    cginfo_mb = fr->cginfo_mb;
 +
 +    *ncg_stay_home = home_pos_cg;
 +    for(d=0; d<dd->ndim; d++)
 +    {
 +        dim = dd->dim[d];
 +        ncg_recv = 0;
 +        nat_recv = 0;
 +        nvr      = 0;
 +        for(dir=0; dir<(dd->nc[dim]==2 ? 1 : 2); dir++)
 +        {
 +            cdd = d*2 + dir;
 +            /* Communicate the cg and atom counts */
 +            sbuf[0] = ncg[cdd];
 +            sbuf[1] = nat[cdd];
 +            if (debug)
 +            {
 +                fprintf(debug,"Sending ddim %d dir %d: ncg %d nat %d\n",
 +                        d,dir,sbuf[0],sbuf[1]);
 +            }
 +            dd_sendrecv_int(dd, d, dir, sbuf, 2, rbuf, 2);
 +            
 +            if ((ncg_recv+rbuf[0])*DD_CGIBS > comm->nalloc_int)
 +            {
 +                comm->nalloc_int = over_alloc_dd((ncg_recv+rbuf[0])*DD_CGIBS);
 +                srenew(comm->buf_int,comm->nalloc_int);
 +            }
 +            
 +            /* Communicate the charge group indices, sizes and flags */
 +            dd_sendrecv_int(dd, d, dir,
 +                            comm->cggl_flag[cdd], sbuf[0]*DD_CGIBS,
 +                            comm->buf_int+ncg_recv*DD_CGIBS, rbuf[0]*DD_CGIBS);
 +            
 +            nvs = ncg[cdd] + nat[cdd]*nvec;
 +            i   = rbuf[0]  + rbuf[1] *nvec;
 +            vec_rvec_check_alloc(&comm->vbuf,nvr+i);
 +            
 +            /* Communicate cgcm and state */
 +            dd_sendrecv_rvec(dd, d, dir,
 +                             comm->cgcm_state[cdd], nvs,
 +                             comm->vbuf.v+nvr, i);
 +            ncg_recv += rbuf[0];
 +            nat_recv += rbuf[1];
 +            nvr      += i;
 +        }
 +        
 +        /* Process the received charge groups */
 +        buf_pos = 0;
 +        for(cg=0; cg<ncg_recv; cg++)
 +        {
 +            flag = comm->buf_int[cg*DD_CGIBS+1];
 +
 +            if (dim >= npbcdim && dd->nc[dim] > 2)
 +            {
 +                /* No pbc in this dim and more than one domain boundary.
 +                 * We do a separate check if a charge group didn't move too far.
 +                 */
 +                if (((flag & DD_FLAG_FW(d)) &&
 +                     comm->vbuf.v[buf_pos][dim] > cell_x1[dim]) ||
 +                    ((flag & DD_FLAG_BW(d)) &&
 +                     comm->vbuf.v[buf_pos][dim] < cell_x0[dim]))
 +                {
 +                    cg_move_error(fplog,dd,step,cg,dim,
 +                                  (flag & DD_FLAG_FW(d)) ? 1 : 0,
 +                                   FALSE,0,
 +                                   comm->vbuf.v[buf_pos],
 +                                   comm->vbuf.v[buf_pos],
 +                                   comm->vbuf.v[buf_pos][dim]);
 +                }
 +            }
 +
 +            mc = -1;
 +            if (d < dd->ndim-1)
 +            {
 +                /* Check which direction this cg should go */
 +                for(d2=d+1; (d2<dd->ndim && mc==-1); d2++)
 +                {
 +                    if (dd->bGridJump)
 +                    {
 +                        /* The cell boundaries for dimension d2 are not equal
 +                         * for each cell row of the lower dimension(s),
 +                         * therefore we might need to redetermine where
 +                         * this cg should go.
 +                         */
 +                        dim2 = dd->dim[d2];
 +                        /* If this cg crosses the box boundary in dimension d2
 +                         * we can use the communicated flag, so we do not
 +                         * have to worry about pbc.
 +                         */
 +                        if (!((dd->ci[dim2] == dd->nc[dim2]-1 &&
 +                               (flag & DD_FLAG_FW(d2))) ||
 +                              (dd->ci[dim2] == 0 &&
 +                               (flag & DD_FLAG_BW(d2)))))
 +                        {
 +                            /* Clear the two flags for this dimension */
 +                            flag &= ~(DD_FLAG_FW(d2) | DD_FLAG_BW(d2));
 +                            /* Determine the location of this cg
 +                             * in lattice coordinates
 +                             */
 +                            pos_d = comm->vbuf.v[buf_pos][dim2];
 +                            if (tric_dir[dim2])
 +                            {
 +                                for(d3=dim2+1; d3<DIM; d3++)
 +                                {
 +                                    pos_d +=
 +                                        comm->vbuf.v[buf_pos][d3]*tcm[d3][dim2];
 +                                }
 +                            }
 +                            /* Check of we are not at the box edge.
 +                             * pbc is only handled in the first step above,
 +                             * but this check could move over pbc while
 +                             * the first step did not due to different rounding.
 +                             */
 +                            if (pos_d >= cell_x1[dim2] &&
 +                                dd->ci[dim2] != dd->nc[dim2]-1)
 +                            {
 +                                flag |= DD_FLAG_FW(d2);
 +                            }
 +                            else if (pos_d < cell_x0[dim2] &&
 +                                     dd->ci[dim2] != 0)
 +                            {
 +                                flag |= DD_FLAG_BW(d2);
 +                            }
 +                            comm->buf_int[cg*DD_CGIBS+1] = flag;
 +                        }
 +                    }
 +                    /* Set to which neighboring cell this cg should go */
 +                    if (flag & DD_FLAG_FW(d2))
 +                    {
 +                        mc = d2*2;
 +                    }
 +                    else if (flag & DD_FLAG_BW(d2))
 +                    {
 +                        if (dd->nc[dd->dim[d2]] > 2)
 +                        {
 +                            mc = d2*2+1;
 +                        }
 +                        else
 +                        {
 +                            mc = d2*2;
 +                        }
 +                    }
 +                }
 +            }
 +            
 +            nrcg = flag & DD_FLAG_NRCG;
 +            if (mc == -1)
 +            {
 +                if (home_pos_cg+1 > dd->cg_nalloc)
 +                {
 +                    dd->cg_nalloc = over_alloc_dd(home_pos_cg+1);
 +                    srenew(dd->index_gl,dd->cg_nalloc);
 +                    srenew(dd->cgindex,dd->cg_nalloc+1);
 +                }
 +                /* Set the global charge group index and size */
 +                dd->index_gl[home_pos_cg] = comm->buf_int[cg*DD_CGIBS];
 +                dd->cgindex[home_pos_cg+1] = dd->cgindex[home_pos_cg] + nrcg;
 +                /* Copy the state from the buffer */
 +                dd_check_alloc_ncg(fr,state,f,home_pos_cg+1);
 +                if (fr->cutoff_scheme == ecutsGROUP)
 +                {
 +                    cg_cm = fr->cg_cm;
 +                    copy_rvec(comm->vbuf.v[buf_pos],cg_cm[home_pos_cg]);
 +                }
 +                buf_pos++;
 +
 +                /* Set the cginfo */
 +                fr->cginfo[home_pos_cg] = ddcginfo(cginfo_mb,
 +                                                   dd->index_gl[home_pos_cg]);
 +                if (comm->bLocalCG)
 +                {
 +                    comm->bLocalCG[dd->index_gl[home_pos_cg]] = TRUE;
 +                }
 +
 +                if (home_pos_at+nrcg > state->nalloc)
 +                {
 +                    dd_realloc_state(state,f,home_pos_at+nrcg);
 +                }
 +                for(i=0; i<nrcg; i++)
 +                {
 +                    copy_rvec(comm->vbuf.v[buf_pos++],
 +                              state->x[home_pos_at+i]);
 +                }
 +                if (bV)
 +                {
 +                    for(i=0; i<nrcg; i++)
 +                    {
 +                        copy_rvec(comm->vbuf.v[buf_pos++],
 +                                  state->v[home_pos_at+i]);
 +                    }
 +                }
 +                if (bSDX)
 +                {
 +                    for(i=0; i<nrcg; i++)
 +                    {
 +                        copy_rvec(comm->vbuf.v[buf_pos++],
 +                                  state->sd_X[home_pos_at+i]);
 +                    }
 +                }
 +                if (bCGP)
 +                {
 +                    for(i=0; i<nrcg; i++)
 +                    {
 +                        copy_rvec(comm->vbuf.v[buf_pos++],
 +                                  state->cg_p[home_pos_at+i]);
 +                    }
 +                }
 +                home_pos_cg += 1;
 +                home_pos_at += nrcg;
 +            }
 +            else
 +            {
 +                /* Reallocate the buffers if necessary  */
 +                if (ncg[mc]+1 > comm->cggl_flag_nalloc[mc])
 +                {
 +                    comm->cggl_flag_nalloc[mc] = over_alloc_dd(ncg[mc]+1);
 +                    srenew(comm->cggl_flag[mc],comm->cggl_flag_nalloc[mc]*DD_CGIBS);
 +                }
 +                nvr = ncg[mc] + nat[mc]*nvec;
 +                if (nvr + 1 + nrcg*nvec > comm->cgcm_state_nalloc[mc])
 +                {
 +                    comm->cgcm_state_nalloc[mc] = over_alloc_dd(nvr + 1 + nrcg*nvec);
 +                    srenew(comm->cgcm_state[mc],comm->cgcm_state_nalloc[mc]);
 +                }
 +                /* Copy from the receive to the send buffers */
 +                memcpy(comm->cggl_flag[mc] + ncg[mc]*DD_CGIBS,
 +                       comm->buf_int + cg*DD_CGIBS,
 +                       DD_CGIBS*sizeof(int));
 +                memcpy(comm->cgcm_state[mc][nvr],
 +                       comm->vbuf.v[buf_pos],
 +                       (1+nrcg*nvec)*sizeof(rvec));
 +                buf_pos += 1 + nrcg*nvec;
 +                ncg[mc] += 1;
 +                nat[mc] += nrcg;
 +            }
 +        }
 +    }
 +    
 +    /* With sorting (!bCompact) the indices are now only partially up to date
 +     * and ncg_home and nat_home are not the real count, since there are
 +     * "holes" in the arrays for the charge groups that moved to neighbors.
 +     */
 +    if (fr->cutoff_scheme == ecutsVERLET)
 +    {
 +        moved = get_moved(comm,home_pos_cg);
 +
 +        for(i=dd->ncg_home; i<home_pos_cg; i++)
 +        {
 +            moved[i] = 0;
 +        }
 +    }
 +    dd->ncg_home = home_pos_cg;
 +    dd->nat_home = home_pos_at;
 +
 +    if (debug)
 +    {
 +        fprintf(debug,
 +                "Finished repartitioning: cgs moved out %d, new home %d\n",
 +                *ncg_moved,dd->ncg_home-*ncg_moved);
 +                
 +    }
 +}
 +
 +void dd_cycles_add(gmx_domdec_t *dd,float cycles,int ddCycl)
 +{
 +    dd->comm->cycl[ddCycl] += cycles;
 +    dd->comm->cycl_n[ddCycl]++;
 +    if (cycles > dd->comm->cycl_max[ddCycl])
 +    {
 +        dd->comm->cycl_max[ddCycl] = cycles;
 +    }
 +}
 +
 +static double force_flop_count(t_nrnb *nrnb)
 +{
 +    int i;
 +    double sum;
 +    const char *name;
 +
 +    sum = 0;
 +    for(i=0; i<eNR_NBKERNEL_FREE_ENERGY; i++)
 +    {
 +        /* To get closer to the real timings, we half the count
 +         * for the normal loops and again half it for water loops.
 +         */
 +        name = nrnb_str(i);
 +        if (strstr(name,"W3") != NULL || strstr(name,"W4") != NULL)
 +        {
 +            sum += nrnb->n[i]*0.25*cost_nrnb(i);
 +        }
 +        else
 +        {
 +            sum += nrnb->n[i]*0.50*cost_nrnb(i);
 +        }
 +    }
 +    for(i=eNR_NBKERNEL_FREE_ENERGY; i<=eNR_NB14; i++)
 +    {
 +        name = nrnb_str(i);
 +        if (strstr(name,"W3") != NULL || strstr(name,"W4") != NULL)
 +        sum += nrnb->n[i]*cost_nrnb(i);
 +    }
 +    for(i=eNR_BONDS; i<=eNR_WALLS; i++)
 +    {
 +        sum += nrnb->n[i]*cost_nrnb(i);
 +    }
 +
 +    return sum;
 +}
 +
 +void dd_force_flop_start(gmx_domdec_t *dd,t_nrnb *nrnb)
 +{
 +    if (dd->comm->eFlop)
 +    {
 +        dd->comm->flop -= force_flop_count(nrnb);
 +    }
 +}
 +void dd_force_flop_stop(gmx_domdec_t *dd,t_nrnb *nrnb)
 +{
 +    if (dd->comm->eFlop)
 +    {
 +        dd->comm->flop += force_flop_count(nrnb);
 +        dd->comm->flop_n++;
 +    }
 +}  
 +
 +static void clear_dd_cycle_counts(gmx_domdec_t *dd)
 +{
 +    int i;
 +    
 +    for(i=0; i<ddCyclNr; i++)
 +    {
 +        dd->comm->cycl[i] = 0;
 +        dd->comm->cycl_n[i] = 0;
 +        dd->comm->cycl_max[i] = 0;
 +    }
 +    dd->comm->flop = 0;
 +    dd->comm->flop_n = 0;
 +}
 +
 +static void get_load_distribution(gmx_domdec_t *dd,gmx_wallcycle_t wcycle)
 +{
 +    gmx_domdec_comm_t *comm;
 +    gmx_domdec_load_t *load;
 +    gmx_domdec_root_t *root=NULL;
 +    int  d,dim,cid,i,pos;
 +    float cell_frac=0,sbuf[DD_NLOAD_MAX];
 +    gmx_bool bSepPME;
 +    
 +    if (debug)
 +    {
 +        fprintf(debug,"get_load_distribution start\n");
 +    }
 +
 +    wallcycle_start(wcycle,ewcDDCOMMLOAD);
 +    
 +    comm = dd->comm;
 +    
 +    bSepPME = (dd->pme_nodeid >= 0);
 +    
 +    for(d=dd->ndim-1; d>=0; d--)
 +    {
 +        dim = dd->dim[d];
 +        /* Check if we participate in the communication in this dimension */
 +        if (d == dd->ndim-1 || 
 +            (dd->ci[dd->dim[d+1]]==0 && dd->ci[dd->dim[dd->ndim-1]]==0))
 +        {
 +            load = &comm->load[d];
 +            if (dd->bGridJump)
 +            {
 +                cell_frac = comm->cell_f1[d] - comm->cell_f0[d];
 +            }
 +            pos = 0;
 +            if (d == dd->ndim-1)
 +            {
 +                sbuf[pos++] = dd_force_load(comm);
 +                sbuf[pos++] = sbuf[0];
 +                if (dd->bGridJump)
 +                {
 +                    sbuf[pos++] = sbuf[0];
 +                    sbuf[pos++] = cell_frac;
 +                    if (d > 0)
 +                    {
 +                        sbuf[pos++] = comm->cell_f_max0[d];
 +                        sbuf[pos++] = comm->cell_f_min1[d];
 +                    }
 +                }
 +                if (bSepPME)
 +                {
 +                    sbuf[pos++] = comm->cycl[ddCyclPPduringPME];
 +                    sbuf[pos++] = comm->cycl[ddCyclPME];
 +                }
 +            }
 +            else
 +            {
 +                sbuf[pos++] = comm->load[d+1].sum;
 +                sbuf[pos++] = comm->load[d+1].max;
 +                if (dd->bGridJump)
 +                {
 +                    sbuf[pos++] = comm->load[d+1].sum_m;
 +                    sbuf[pos++] = comm->load[d+1].cvol_min*cell_frac;
 +                    sbuf[pos++] = comm->load[d+1].flags;
 +                    if (d > 0)
 +                    {
 +                        sbuf[pos++] = comm->cell_f_max0[d];
 +                        sbuf[pos++] = comm->cell_f_min1[d];
 +                    }
 +                }
 +                if (bSepPME)
 +                {
 +                    sbuf[pos++] = comm->load[d+1].mdf;
 +                    sbuf[pos++] = comm->load[d+1].pme;
 +                }
 +            }
 +            load->nload = pos;
 +            /* Communicate a row in DD direction d.
 +             * The communicators are setup such that the root always has rank 0.
 +             */
 +#ifdef GMX_MPI
 +            MPI_Gather(sbuf      ,load->nload*sizeof(float),MPI_BYTE,
 +                       load->load,load->nload*sizeof(float),MPI_BYTE,
 +                       0,comm->mpi_comm_load[d]);
 +#endif
 +            if (dd->ci[dim] == dd->master_ci[dim])
 +            {
 +                /* We are the root, process this row */
 +                if (comm->bDynLoadBal)
 +                {
 +                    root = comm->root[d];
 +                }
 +                load->sum = 0;
 +                load->max = 0;
 +                load->sum_m = 0;
 +                load->cvol_min = 1;
 +                load->flags = 0;
 +                load->mdf = 0;
 +                load->pme = 0;
 +                pos = 0;
 +                for(i=0; i<dd->nc[dim]; i++)
 +                {
 +                    load->sum += load->load[pos++];
 +                    load->max = max(load->max,load->load[pos]);
 +                    pos++;
 +                    if (dd->bGridJump)
 +                    {
 +                        if (root->bLimited)
 +                        {
 +                            /* This direction could not be load balanced properly,
 +                             * therefore we need to use the maximum iso the average load.
 +                             */
 +                            load->sum_m = max(load->sum_m,load->load[pos]);
 +                        }
 +                        else
 +                        {
 +                            load->sum_m += load->load[pos];
 +                        }
 +                        pos++;
 +                        load->cvol_min = min(load->cvol_min,load->load[pos]);
 +                        pos++;
 +                        if (d < dd->ndim-1)
 +                        {
 +                            load->flags = (int)(load->load[pos++] + 0.5);
 +                        }
 +                        if (d > 0)
 +                        {
 +                            root->cell_f_max0[i] = load->load[pos++];
 +                            root->cell_f_min1[i] = load->load[pos++];
 +                        }
 +                    }
 +                    if (bSepPME)
 +                    {
 +                        load->mdf = max(load->mdf,load->load[pos]);
 +                        pos++;
 +                        load->pme = max(load->pme,load->load[pos]);
 +                        pos++;
 +                    }
 +                }
 +                if (comm->bDynLoadBal && root->bLimited)
 +                {
 +                    load->sum_m *= dd->nc[dim];
 +                    load->flags |= (1<<d);
 +                }
 +            }
 +        }
 +    }
 +
 +    if (DDMASTER(dd))
 +    {
 +        comm->nload      += dd_load_count(comm);
 +        comm->load_step  += comm->cycl[ddCyclStep];
 +        comm->load_sum   += comm->load[0].sum;
 +        comm->load_max   += comm->load[0].max;
 +        if (comm->bDynLoadBal)
 +        {
 +            for(d=0; d<dd->ndim; d++)
 +            {
 +                if (comm->load[0].flags & (1<<d))
 +                {
 +                    comm->load_lim[d]++;
 +                }
 +            }
 +        }
 +        if (bSepPME)
 +        {
 +            comm->load_mdf += comm->load[0].mdf;
 +            comm->load_pme += comm->load[0].pme;
 +        }
 +    }
 +
 +    wallcycle_stop(wcycle,ewcDDCOMMLOAD);
 +    
 +    if (debug)
 +    {
 +        fprintf(debug,"get_load_distribution finished\n");
 +    }
 +}
 +
 +static float dd_force_imb_perf_loss(gmx_domdec_t *dd)
 +{
 +    /* Return the relative performance loss on the total run time
 +     * due to the force calculation load imbalance.
 +     */
 +    if (dd->comm->nload > 0)
 +    {
 +        return
 +            (dd->comm->load_max*dd->nnodes - dd->comm->load_sum)/
 +            (dd->comm->load_step*dd->nnodes);
 +    }
 +    else
 +    {
 +        return 0;
 +    }
 +}
 +
 +static void print_dd_load_av(FILE *fplog,gmx_domdec_t *dd)
 +{
 +    char  buf[STRLEN];
 +    int   npp,npme,nnodes,d,limp;
 +    float imbal,pme_f_ratio,lossf,lossp=0;
 +    gmx_bool  bLim;
 +    gmx_domdec_comm_t *comm;
 +
 +    comm = dd->comm;
 +    if (DDMASTER(dd) && comm->nload > 0)
 +    {
 +        npp    = dd->nnodes;
 +        npme   = (dd->pme_nodeid >= 0) ? comm->npmenodes : 0;
 +        nnodes = npp + npme;
 +        imbal = comm->load_max*npp/comm->load_sum - 1;
 +        lossf = dd_force_imb_perf_loss(dd);
 +        sprintf(buf," Average load imbalance: %.1f %%\n",imbal*100);
 +        fprintf(fplog,"%s",buf);
 +        fprintf(stderr,"\n");
 +        fprintf(stderr,"%s",buf);
 +        sprintf(buf," Part of the total run time spent waiting due to load imbalance: %.1f %%\n",lossf*100);
 +        fprintf(fplog,"%s",buf);
 +        fprintf(stderr,"%s",buf);
 +        bLim = FALSE;
 +        if (comm->bDynLoadBal)
 +        {
 +            sprintf(buf," Steps where the load balancing was limited by -rdd, -rcon and/or -dds:");
 +            for(d=0; d<dd->ndim; d++)
 +            {
 +                limp = (200*comm->load_lim[d]+1)/(2*comm->nload);
 +                sprintf(buf+strlen(buf)," %c %d %%",dim2char(dd->dim[d]),limp);
 +                if (limp >= 50)
 +                {
 +                    bLim = TRUE;
 +                }
 +            }
 +            sprintf(buf+strlen(buf),"\n");
 +            fprintf(fplog,"%s",buf);
 +            fprintf(stderr,"%s",buf);
 +        }
 +        if (npme > 0)
 +        {
 +            pme_f_ratio = comm->load_pme/comm->load_mdf;
 +            lossp = (comm->load_pme -comm->load_mdf)/comm->load_step;
 +            if (lossp <= 0)
 +            {
 +                lossp *= (float)npme/(float)nnodes;
 +            }
 +            else
 +            {
 +                lossp *= (float)npp/(float)nnodes;
 +            }
 +            sprintf(buf," Average PME mesh/force load: %5.3f\n",pme_f_ratio);
 +            fprintf(fplog,"%s",buf);
 +            fprintf(stderr,"%s",buf);
 +            sprintf(buf," Part of the total run time spent waiting due to PP/PME imbalance: %.1f %%\n",fabs(lossp)*100);
 +            fprintf(fplog,"%s",buf);
 +            fprintf(stderr,"%s",buf);
 +        }
 +        fprintf(fplog,"\n");
 +        fprintf(stderr,"\n");
 +        
 +        if (lossf >= DD_PERF_LOSS)
 +        {
 +            sprintf(buf,
-                               ncg_moved,comm->moved,
++                    "NOTE: %.1f %% of the available CPU time was lost due to load imbalance\n"
 +                    "      in the domain decomposition.\n",lossf*100);
 +            if (!comm->bDynLoadBal)
 +            {
 +                sprintf(buf+strlen(buf),"      You might want to use dynamic load balancing (option -dlb.)\n");
 +            }
 +            else if (bLim)
 +            {
 +                sprintf(buf+strlen(buf),"      You might want to decrease the cell size limit (options -rdd, -rcon and/or -dds).\n");
 +            }
 +            fprintf(fplog,"%s\n",buf);
 +            fprintf(stderr,"%s\n",buf);
 +        }
 +        if (npme > 0 && fabs(lossp) >= DD_PERF_LOSS)
 +        {
 +            sprintf(buf,
 +                    "NOTE: %.1f %% performance was lost because the PME nodes\n"
 +                    "      had %s work to do than the PP nodes.\n"
 +                    "      You might want to %s the number of PME nodes\n"
 +                    "      or %s the cut-off and the grid spacing.\n",
 +                    fabs(lossp*100),
 +                    (lossp < 0) ? "less"     : "more",
 +                    (lossp < 0) ? "decrease" : "increase",
 +                    (lossp < 0) ? "decrease" : "increase");
 +            fprintf(fplog,"%s\n",buf);
 +            fprintf(stderr,"%s\n",buf);
 +        }
 +    }
 +}
 +
 +static float dd_vol_min(gmx_domdec_t *dd)
 +{
 +    return dd->comm->load[0].cvol_min*dd->nnodes;
 +}
 +
 +static gmx_bool dd_load_flags(gmx_domdec_t *dd)
 +{
 +    return dd->comm->load[0].flags;
 +}
 +
 +static float dd_f_imbal(gmx_domdec_t *dd)
 +{
 +    return dd->comm->load[0].max*dd->nnodes/dd->comm->load[0].sum - 1;
 +}
 +
 +float dd_pme_f_ratio(gmx_domdec_t *dd)
 +{
 +    if (dd->comm->cycl_n[ddCyclPME] > 0)
 +    {
 +        return dd->comm->load[0].pme/dd->comm->load[0].mdf;
 +    }
 +    else
 +    {
 +        return -1.0;
 +    }
 +}
 +
 +static void dd_print_load(FILE *fplog,gmx_domdec_t *dd,gmx_large_int_t step)
 +{
 +    int flags,d;
 +    char buf[22];
 +    
 +    flags = dd_load_flags(dd);
 +    if (flags)
 +    {
 +        fprintf(fplog,
 +                "DD  load balancing is limited by minimum cell size in dimension");
 +        for(d=0; d<dd->ndim; d++)
 +        {
 +            if (flags & (1<<d))
 +            {
 +                fprintf(fplog," %c",dim2char(dd->dim[d]));
 +            }
 +        }
 +        fprintf(fplog,"\n");
 +    }
 +    fprintf(fplog,"DD  step %s",gmx_step_str(step,buf));
 +    if (dd->comm->bDynLoadBal)
 +    {
 +        fprintf(fplog,"  vol min/aver %5.3f%c",
 +                dd_vol_min(dd),flags ? '!' : ' ');
 +    }
 +    fprintf(fplog," load imb.: force %4.1f%%",dd_f_imbal(dd)*100);
 +    if (dd->comm->cycl_n[ddCyclPME])
 +    {
 +        fprintf(fplog,"  pme mesh/force %5.3f",dd_pme_f_ratio(dd));
 +    }
 +    fprintf(fplog,"\n\n");
 +}
 +
 +static void dd_print_load_verbose(gmx_domdec_t *dd)
 +{
 +    if (dd->comm->bDynLoadBal)
 +    {
 +        fprintf(stderr,"vol %4.2f%c ",
 +                dd_vol_min(dd),dd_load_flags(dd) ? '!' : ' ');
 +    }
 +    fprintf(stderr,"imb F %2d%% ",(int)(dd_f_imbal(dd)*100+0.5));
 +    if (dd->comm->cycl_n[ddCyclPME])
 +    {
 +        fprintf(stderr,"pme/F %4.2f ",dd_pme_f_ratio(dd));
 +    }
 +}
 +
 +#ifdef GMX_MPI
 +static void make_load_communicator(gmx_domdec_t *dd, int dim_ind,ivec loc)
 +{
 +    MPI_Comm  c_row;
 +    int  dim, i, rank;
 +    ivec loc_c;
 +    gmx_domdec_root_t *root;
 +    gmx_bool bPartOfGroup = FALSE;
 +    
 +    dim = dd->dim[dim_ind];
 +    copy_ivec(loc,loc_c);
 +    for(i=0; i<dd->nc[dim]; i++)
 +    {
 +        loc_c[dim] = i;
 +        rank = dd_index(dd->nc,loc_c);
 +        if (rank == dd->rank)
 +        {
 +            /* This process is part of the group */
 +            bPartOfGroup = TRUE;
 +        }
 +    }
 +    MPI_Comm_split(dd->mpi_comm_all, bPartOfGroup?0:MPI_UNDEFINED, dd->rank,
 +                   &c_row);
 +    if (bPartOfGroup)
 +    {
 +        dd->comm->mpi_comm_load[dim_ind] = c_row;
 +        if (dd->comm->eDLB != edlbNO)
 +        {
 +            if (dd->ci[dim] == dd->master_ci[dim])
 +            {
 +                /* This is the root process of this row */
 +                snew(dd->comm->root[dim_ind],1);
 +                root = dd->comm->root[dim_ind];
 +                snew(root->cell_f,DD_CELL_F_SIZE(dd,dim_ind));
 +                snew(root->old_cell_f,dd->nc[dim]+1);
 +                snew(root->bCellMin,dd->nc[dim]);
 +                if (dim_ind > 0)
 +                {
 +                    snew(root->cell_f_max0,dd->nc[dim]);
 +                    snew(root->cell_f_min1,dd->nc[dim]);
 +                    snew(root->bound_min,dd->nc[dim]);
 +                    snew(root->bound_max,dd->nc[dim]);
 +                }
 +                snew(root->buf_ncd,dd->nc[dim]);
 +            }
 +            else
 +            {
 +                /* This is not a root process, we only need to receive cell_f */
 +                snew(dd->comm->cell_f_row,DD_CELL_F_SIZE(dd,dim_ind));
 +            }
 +        }
 +        if (dd->ci[dim] == dd->master_ci[dim])
 +        {
 +            snew(dd->comm->load[dim_ind].load,dd->nc[dim]*DD_NLOAD_MAX);
 +        }
 +    }
 +}
 +#endif
 +
 +static void make_load_communicators(gmx_domdec_t *dd)
 +{
 +#ifdef GMX_MPI
 +  int  dim0,dim1,i,j;
 +  ivec loc;
 +
 +  if (debug)
 +    fprintf(debug,"Making load communicators\n");
 +
 +  snew(dd->comm->load,dd->ndim);
 +  snew(dd->comm->mpi_comm_load,dd->ndim);
 +  
 +  clear_ivec(loc);
 +  make_load_communicator(dd,0,loc);
 +  if (dd->ndim > 1) {
 +    dim0 = dd->dim[0];
 +    for(i=0; i<dd->nc[dim0]; i++) {
 +      loc[dim0] = i;
 +      make_load_communicator(dd,1,loc);
 +    }
 +  }
 +  if (dd->ndim > 2) {
 +    dim0 = dd->dim[0];
 +    for(i=0; i<dd->nc[dim0]; i++) {
 +      loc[dim0] = i;
 +      dim1 = dd->dim[1];
 +      for(j=0; j<dd->nc[dim1]; j++) {
 +        loc[dim1] = j;
 +        make_load_communicator(dd,2,loc);
 +      }
 +    }
 +  }
 +
 +  if (debug)
 +    fprintf(debug,"Finished making load communicators\n");
 +#endif
 +}
 +
 +void setup_dd_grid(FILE *fplog,gmx_domdec_t *dd)
 +{
 +    gmx_bool bZYX;
 +    int  d,dim,i,j,m;
 +    ivec tmp,s;
 +    int  nzone,nzonep;
 +    ivec dd_zp[DD_MAXIZONE];
 +    gmx_domdec_zones_t *zones;
 +    gmx_domdec_ns_ranges_t *izone;
 +    
 +    for(d=0; d<dd->ndim; d++)
 +    {
 +        dim = dd->dim[d];
 +        copy_ivec(dd->ci,tmp);
 +        tmp[dim] = (tmp[dim] + 1) % dd->nc[dim];
 +        dd->neighbor[d][0] = ddcoord2ddnodeid(dd,tmp);
 +        copy_ivec(dd->ci,tmp);
 +        tmp[dim] = (tmp[dim] - 1 + dd->nc[dim]) % dd->nc[dim];
 +        dd->neighbor[d][1] = ddcoord2ddnodeid(dd,tmp);
 +        if (debug)
 +        {
 +            fprintf(debug,"DD rank %d neighbor ranks in dir %d are + %d - %d\n",
 +                    dd->rank,dim,
 +                    dd->neighbor[d][0],
 +                    dd->neighbor[d][1]);
 +        }
 +    }
 +    
 +    if (DDMASTER(dd))
 +    {
 +        fprintf(stderr,"Making %dD domain decomposition %d x %d x %d\n",
 +          dd->ndim,dd->nc[XX],dd->nc[YY],dd->nc[ZZ]);
 +    }
 +    if (fplog)
 +    {
 +        fprintf(fplog,"\nMaking %dD domain decomposition grid %d x %d x %d, home cell index %d %d %d\n\n",
 +                dd->ndim,
 +                dd->nc[XX],dd->nc[YY],dd->nc[ZZ],
 +                dd->ci[XX],dd->ci[YY],dd->ci[ZZ]);
 +    }
 +    switch (dd->ndim)
 +    {
 +    case 3:
 +        nzone  = dd_z3n;
 +        nzonep = dd_zp3n;
 +        for(i=0; i<nzonep; i++)
 +        {
 +            copy_ivec(dd_zp3[i],dd_zp[i]);
 +        }
 +        break;
 +    case 2:
 +        nzone  = dd_z2n;
 +        nzonep = dd_zp2n;
 +        for(i=0; i<nzonep; i++)
 +        {
 +            copy_ivec(dd_zp2[i],dd_zp[i]);
 +        }
 +        break;
 +    case 1:
 +        nzone  = dd_z1n;
 +        nzonep = dd_zp1n;
 +        for(i=0; i<nzonep; i++)
 +        {
 +            copy_ivec(dd_zp1[i],dd_zp[i]);
 +        }
 +        break;
 +    default:
 +        gmx_fatal(FARGS,"Can only do 1, 2 or 3D domain decomposition");
 +        nzone = 0;
 +        nzonep = 0;
 +    }
 +
 +    zones = &dd->comm->zones;
 +
 +    for(i=0; i<nzone; i++)
 +    {
 +        m = 0;
 +        clear_ivec(zones->shift[i]);
 +        for(d=0; d<dd->ndim; d++)
 +        {
 +            zones->shift[i][dd->dim[d]] = dd_zo[i][m++];
 +        }
 +    }
 +    
 +    zones->n = nzone;
 +    for(i=0; i<nzone; i++)
 +    {
 +        for(d=0; d<DIM; d++)
 +        {
 +            s[d] = dd->ci[d] - zones->shift[i][d];
 +            if (s[d] < 0)
 +            {
 +                s[d] += dd->nc[d];
 +            }
 +            else if (s[d] >= dd->nc[d])
 +            {
 +                s[d] -= dd->nc[d];
 +            }
 +        }
 +    }
 +    zones->nizone = nzonep;
 +    for(i=0; i<zones->nizone; i++)
 +    {
 +        if (dd_zp[i][0] != i)
 +        {
 +            gmx_fatal(FARGS,"Internal inconsistency in the dd grid setup");
 +        }
 +        izone = &zones->izone[i];
 +        izone->j0 = dd_zp[i][1];
 +        izone->j1 = dd_zp[i][2];
 +        for(dim=0; dim<DIM; dim++)
 +        {
 +            if (dd->nc[dim] == 1)
 +            {
 +                /* All shifts should be allowed */
 +                izone->shift0[dim] = -1;
 +                izone->shift1[dim] = 1;
 +            }
 +            else
 +            {
 +                /*
 +                  izone->shift0[d] = 0;
 +                  izone->shift1[d] = 0;
 +                  for(j=izone->j0; j<izone->j1; j++) {
 +                  if (dd->shift[j][d] > dd->shift[i][d])
 +                  izone->shift0[d] = -1;
 +                  if (dd->shift[j][d] < dd->shift[i][d])
 +                  izone->shift1[d] = 1;
 +                  }
 +                */
 +                
 +                int shift_diff;
 +                
 +                /* Assume the shift are not more than 1 cell */
 +                izone->shift0[dim] = 1;
 +                izone->shift1[dim] = -1;
 +                for(j=izone->j0; j<izone->j1; j++)
 +                {
 +                    shift_diff = zones->shift[j][dim] - zones->shift[i][dim];
 +                    if (shift_diff < izone->shift0[dim])
 +                    {
 +                        izone->shift0[dim] = shift_diff;
 +                    }
 +                    if (shift_diff > izone->shift1[dim])
 +                    {
 +                        izone->shift1[dim] = shift_diff;
 +                    }
 +                }
 +            }
 +        }
 +    }
 +    
 +    if (dd->comm->eDLB != edlbNO)
 +    {
 +        snew(dd->comm->root,dd->ndim);
 +    }
 +    
 +    if (dd->comm->bRecordLoad)
 +    {
 +        make_load_communicators(dd);
 +    }
 +}
 +
 +static void make_pp_communicator(FILE *fplog,t_commrec *cr,int reorder)
 +{
 +    gmx_domdec_t *dd;
 +    gmx_domdec_comm_t *comm;
 +    int  i,rank,*buf;
 +    ivec periods;
 +#ifdef GMX_MPI
 +    MPI_Comm comm_cart;
 +#endif
 +    
 +    dd = cr->dd;
 +    comm = dd->comm;
 +    
 +#ifdef GMX_MPI
 +    if (comm->bCartesianPP)
 +    {
 +        /* Set up cartesian communication for the particle-particle part */
 +        if (fplog)
 +        {
 +            fprintf(fplog,"Will use a Cartesian communicator: %d x %d x %d\n",
 +                    dd->nc[XX],dd->nc[YY],dd->nc[ZZ]);
 +        }
 +        
 +        for(i=0; i<DIM; i++)
 +        {
 +            periods[i] = TRUE;
 +        }
 +        MPI_Cart_create(cr->mpi_comm_mygroup,DIM,dd->nc,periods,reorder,
 +                        &comm_cart);
 +        /* We overwrite the old communicator with the new cartesian one */
 +        cr->mpi_comm_mygroup = comm_cart;
 +    }
 +    
 +    dd->mpi_comm_all = cr->mpi_comm_mygroup;
 +    MPI_Comm_rank(dd->mpi_comm_all,&dd->rank);
 +    
 +    if (comm->bCartesianPP_PME)
 +    {
 +        /* Since we want to use the original cartesian setup for sim,
 +         * and not the one after split, we need to make an index.
 +         */
 +        snew(comm->ddindex2ddnodeid,dd->nnodes);
 +        comm->ddindex2ddnodeid[dd_index(dd->nc,dd->ci)] = dd->rank;
 +        gmx_sumi(dd->nnodes,comm->ddindex2ddnodeid,cr);
 +        /* Get the rank of the DD master,
 +         * above we made sure that the master node is a PP node.
 +         */
 +        if (MASTER(cr))
 +        {
 +            rank = dd->rank;
 +        }
 +        else
 +        {
 +            rank = 0;
 +        }
 +        MPI_Allreduce(&rank,&dd->masterrank,1,MPI_INT,MPI_SUM,dd->mpi_comm_all);
 +    }
 +    else if (comm->bCartesianPP)
 +    {
 +        if (cr->npmenodes == 0)
 +        {
 +            /* The PP communicator is also
 +             * the communicator for this simulation
 +             */
 +            cr->mpi_comm_mysim = cr->mpi_comm_mygroup;
 +        }
 +        cr->nodeid = dd->rank;
 +        
 +        MPI_Cart_coords(dd->mpi_comm_all,dd->rank,DIM,dd->ci);
 +        
 +        /* We need to make an index to go from the coordinates
 +         * to the nodeid of this simulation.
 +         */
 +        snew(comm->ddindex2simnodeid,dd->nnodes);
 +        snew(buf,dd->nnodes);
 +        if (cr->duty & DUTY_PP)
 +        {
 +            buf[dd_index(dd->nc,dd->ci)] = cr->sim_nodeid;
 +        }
 +        /* Communicate the ddindex to simulation nodeid index */
 +        MPI_Allreduce(buf,comm->ddindex2simnodeid,dd->nnodes,MPI_INT,MPI_SUM,
 +                      cr->mpi_comm_mysim);
 +        sfree(buf);
 +        
 +        /* Determine the master coordinates and rank.
 +         * The DD master should be the same node as the master of this sim.
 +         */
 +        for(i=0; i<dd->nnodes; i++)
 +        {
 +            if (comm->ddindex2simnodeid[i] == 0)
 +            {
 +                ddindex2xyz(dd->nc,i,dd->master_ci);
 +                MPI_Cart_rank(dd->mpi_comm_all,dd->master_ci,&dd->masterrank);
 +            }
 +        }
 +        if (debug)
 +        {
 +            fprintf(debug,"The master rank is %d\n",dd->masterrank);
 +        }
 +    }
 +    else
 +    {
 +        /* No Cartesian communicators */
 +        /* We use the rank in dd->comm->all as DD index */
 +        ddindex2xyz(dd->nc,dd->rank,dd->ci);
 +        /* The simulation master nodeid is 0, so the DD master rank is also 0 */
 +        dd->masterrank = 0;
 +        clear_ivec(dd->master_ci);
 +    }
 +#endif
 +  
 +    if (fplog)
 +    {
 +        fprintf(fplog,
 +                "Domain decomposition nodeid %d, coordinates %d %d %d\n\n",
 +                dd->rank,dd->ci[XX],dd->ci[YY],dd->ci[ZZ]);
 +    }
 +    if (debug)
 +    {
 +        fprintf(debug,
 +                "Domain decomposition nodeid %d, coordinates %d %d %d\n\n",
 +                dd->rank,dd->ci[XX],dd->ci[YY],dd->ci[ZZ]);
 +    }
 +}
 +
 +static void receive_ddindex2simnodeid(t_commrec *cr)
 +{
 +    gmx_domdec_t *dd;
 +    
 +    gmx_domdec_comm_t *comm;
 +    int  *buf;
 +    
 +    dd = cr->dd;
 +    comm = dd->comm;
 +    
 +#ifdef GMX_MPI
 +    if (!comm->bCartesianPP_PME && comm->bCartesianPP)
 +    {
 +        snew(comm->ddindex2simnodeid,dd->nnodes);
 +        snew(buf,dd->nnodes);
 +        if (cr->duty & DUTY_PP)
 +        {
 +            buf[dd_index(dd->nc,dd->ci)] = cr->sim_nodeid;
 +        }
 +#ifdef GMX_MPI
 +        /* Communicate the ddindex to simulation nodeid index */
 +        MPI_Allreduce(buf,comm->ddindex2simnodeid,dd->nnodes,MPI_INT,MPI_SUM,
 +                      cr->mpi_comm_mysim);
 +#endif
 +        sfree(buf);
 +    }
 +#endif
 +}
 +
 +static gmx_domdec_master_t *init_gmx_domdec_master_t(gmx_domdec_t *dd,
 +                                                     int ncg,int natoms)
 +{
 +    gmx_domdec_master_t *ma;
 +    int i;
 +
 +    snew(ma,1);
 +    
 +    snew(ma->ncg,dd->nnodes);
 +    snew(ma->index,dd->nnodes+1);
 +    snew(ma->cg,ncg);
 +    snew(ma->nat,dd->nnodes);
 +    snew(ma->ibuf,dd->nnodes*2);
 +    snew(ma->cell_x,DIM);
 +    for(i=0; i<DIM; i++)
 +    {
 +        snew(ma->cell_x[i],dd->nc[i]+1);
 +    }
 +
 +    if (dd->nnodes <= GMX_DD_NNODES_SENDRECV)
 +    {
 +        ma->vbuf = NULL;
 +    }
 +    else
 +    {
 +        snew(ma->vbuf,natoms);
 +    }
 +
 +    return ma;
 +}
 +
 +static void split_communicator(FILE *fplog,t_commrec *cr,int dd_node_order,
 +                               int reorder)
 +{
 +    gmx_domdec_t *dd;
 +    gmx_domdec_comm_t *comm;
 +    int  i,rank;
 +    gmx_bool bDiv[DIM];
 +    ivec periods;
 +#ifdef GMX_MPI
 +    MPI_Comm comm_cart;
 +#endif
 +    
 +    dd = cr->dd;
 +    comm = dd->comm;
 +    
 +    if (comm->bCartesianPP)
 +    {
 +        for(i=1; i<DIM; i++)
 +        {
 +            bDiv[i] = ((cr->npmenodes*dd->nc[i]) % (dd->nnodes) == 0);
 +        }
 +        if (bDiv[YY] || bDiv[ZZ])
 +        {
 +            comm->bCartesianPP_PME = TRUE;
 +            /* If we have 2D PME decomposition, which is always in x+y,
 +             * we stack the PME only nodes in z.
 +             * Otherwise we choose the direction that provides the thinnest slab
 +             * of PME only nodes as this will have the least effect
 +             * on the PP communication.
 +             * But for the PME communication the opposite might be better.
 +             */
 +            if (bDiv[ZZ] && (comm->npmenodes_y > 1 ||
 +                             !bDiv[YY] ||
 +                             dd->nc[YY] > dd->nc[ZZ]))
 +            {
 +                comm->cartpmedim = ZZ;
 +            }
 +            else
 +            {
 +                comm->cartpmedim = YY;
 +            }
 +            comm->ntot[comm->cartpmedim]
 +                += (cr->npmenodes*dd->nc[comm->cartpmedim])/dd->nnodes;
 +        }
 +        else if (fplog)
 +        {
 +            fprintf(fplog,"#pmenodes (%d) is not a multiple of nx*ny (%d*%d) or nx*nz (%d*%d)\n",cr->npmenodes,dd->nc[XX],dd->nc[YY],dd->nc[XX],dd->nc[ZZ]);
 +            fprintf(fplog,
 +                    "Will not use a Cartesian communicator for PP <-> PME\n\n");
 +        }
 +    }
 +    
 +#ifdef GMX_MPI
 +    if (comm->bCartesianPP_PME)
 +    {
 +        if (fplog)
 +        {
 +            fprintf(fplog,"Will use a Cartesian communicator for PP <-> PME: %d x %d x %d\n",comm->ntot[XX],comm->ntot[YY],comm->ntot[ZZ]);
 +        }
 +        
 +        for(i=0; i<DIM; i++)
 +        {
 +            periods[i] = TRUE;
 +        }
 +        MPI_Cart_create(cr->mpi_comm_mysim,DIM,comm->ntot,periods,reorder,
 +                        &comm_cart);
 +        
 +        MPI_Comm_rank(comm_cart,&rank);
 +        if (MASTERNODE(cr) && rank != 0)
 +        {
 +            gmx_fatal(FARGS,"MPI rank 0 was renumbered by MPI_Cart_create, we do not allow this");
 +        }
 +        
 +        /* With this assigment we loose the link to the original communicator
 +         * which will usually be MPI_COMM_WORLD, unless have multisim.
 +         */
 +        cr->mpi_comm_mysim = comm_cart;
 +        cr->sim_nodeid = rank;
 +        
 +        MPI_Cart_coords(cr->mpi_comm_mysim,cr->sim_nodeid,DIM,dd->ci);
 +        
 +        if (fplog)
 +        {
 +            fprintf(fplog,"Cartesian nodeid %d, coordinates %d %d %d\n\n",
 +                    cr->sim_nodeid,dd->ci[XX],dd->ci[YY],dd->ci[ZZ]);
 +        }
 +        
 +        if (dd->ci[comm->cartpmedim] < dd->nc[comm->cartpmedim])
 +        {
 +            cr->duty = DUTY_PP;
 +        }
 +        if (cr->npmenodes == 0 ||
 +            dd->ci[comm->cartpmedim] >= dd->nc[comm->cartpmedim])
 +        {
 +            cr->duty = DUTY_PME;
 +        }
 +        
 +        /* Split the sim communicator into PP and PME only nodes */
 +        MPI_Comm_split(cr->mpi_comm_mysim,
 +                       cr->duty,
 +                       dd_index(comm->ntot,dd->ci),
 +                       &cr->mpi_comm_mygroup);
 +    }
 +    else
 +    {
 +        switch (dd_node_order)
 +        {
 +        case ddnoPP_PME:
 +            if (fplog)
 +            {
 +                fprintf(fplog,"Order of the nodes: PP first, PME last\n");
 +            }
 +            break;
 +        case ddnoINTERLEAVE:
 +            /* Interleave the PP-only and PME-only nodes,
 +             * as on clusters with dual-core machines this will double
 +             * the communication bandwidth of the PME processes
 +             * and thus speed up the PP <-> PME and inter PME communication.
 +             */
 +            if (fplog)
 +            {
 +                fprintf(fplog,"Interleaving PP and PME nodes\n");
 +            }
 +            comm->pmenodes = dd_pmenodes(cr);
 +            break;
 +        case ddnoCARTESIAN:
 +            break;
 +        default:
 +            gmx_fatal(FARGS,"Unknown dd_node_order=%d",dd_node_order);
 +        }
 +    
 +        if (dd_simnode2pmenode(cr,cr->sim_nodeid) == -1)
 +        {
 +            cr->duty = DUTY_PME;
 +        }
 +        else
 +        {
 +            cr->duty = DUTY_PP;
 +        }
 +        
 +        /* Split the sim communicator into PP and PME only nodes */
 +        MPI_Comm_split(cr->mpi_comm_mysim,
 +                       cr->duty,
 +                       cr->nodeid,
 +                       &cr->mpi_comm_mygroup);
 +        MPI_Comm_rank(cr->mpi_comm_mygroup,&cr->nodeid);
 +    }
 +#endif
 +
 +    if (fplog)
 +    {
 +        fprintf(fplog,"This is a %s only node\n\n",
 +                (cr->duty & DUTY_PP) ? "particle-particle" : "PME-mesh");
 +    }
 +}
 +
 +void make_dd_communicators(FILE *fplog,t_commrec *cr,int dd_node_order)
 +{
 +    gmx_domdec_t *dd;
 +    gmx_domdec_comm_t *comm;
 +    int CartReorder;
 +    
 +    dd = cr->dd;
 +    comm = dd->comm;
 +    
 +    copy_ivec(dd->nc,comm->ntot);
 +    
 +    comm->bCartesianPP = (dd_node_order == ddnoCARTESIAN);
 +    comm->bCartesianPP_PME = FALSE;
 +    
 +    /* Reorder the nodes by default. This might change the MPI ranks.
 +     * Real reordering is only supported on very few architectures,
 +     * Blue Gene is one of them.
 +     */
 +    CartReorder = (getenv("GMX_NO_CART_REORDER") == NULL);
 +    
 +    if (cr->npmenodes > 0)
 +    {
 +        /* Split the communicator into a PP and PME part */
 +        split_communicator(fplog,cr,dd_node_order,CartReorder);
 +        if (comm->bCartesianPP_PME)
 +        {
 +            /* We (possibly) reordered the nodes in split_communicator,
 +             * so it is no longer required in make_pp_communicator.
 +             */
 +            CartReorder = FALSE;
 +        }
 +    }
 +    else
 +    {
 +        /* All nodes do PP and PME */
 +#ifdef GMX_MPI    
 +        /* We do not require separate communicators */
 +        cr->mpi_comm_mygroup = cr->mpi_comm_mysim;
 +#endif
 +    }
 +    
 +    if (cr->duty & DUTY_PP)
 +    {
 +        /* Copy or make a new PP communicator */
 +        make_pp_communicator(fplog,cr,CartReorder);
 +    }
 +    else
 +    {
 +        receive_ddindex2simnodeid(cr);
 +    }
 +    
 +    if (!(cr->duty & DUTY_PME))
 +    {
 +        /* Set up the commnuication to our PME node */
 +        dd->pme_nodeid = dd_simnode2pmenode(cr,cr->sim_nodeid);
 +        dd->pme_receive_vir_ener = receive_vir_ener(cr);
 +        if (debug)
 +        {
 +            fprintf(debug,"My pme_nodeid %d receive ener %d\n",
 +                    dd->pme_nodeid,dd->pme_receive_vir_ener);
 +        }
 +    }
 +    else
 +    {
 +        dd->pme_nodeid = -1;
 +    }
 +
 +    if (DDMASTER(dd))
 +    {
 +        dd->ma = init_gmx_domdec_master_t(dd,
 +                                          comm->cgs_gl.nr,
 +                                          comm->cgs_gl.index[comm->cgs_gl.nr]);
 +    }
 +}
 +
 +static real *get_slb_frac(FILE *fplog,const char *dir,int nc,const char *size_string)
 +{
 +    real *slb_frac,tot;
 +    int  i,n;
 +    double dbl;
 +    
 +    slb_frac = NULL;
 +    if (nc > 1 && size_string != NULL)
 +    {
 +        if (fplog)
 +        {
 +            fprintf(fplog,"Using static load balancing for the %s direction\n",
 +                    dir);
 +        }
 +        snew(slb_frac,nc);
 +        tot = 0;
 +        for (i=0; i<nc; i++)
 +        {
 +            dbl = 0;
 +            sscanf(size_string,"%lf%n",&dbl,&n);
 +            if (dbl == 0)
 +            {
 +                gmx_fatal(FARGS,"Incorrect or not enough DD cell size entries for direction %s: '%s'",dir,size_string);
 +            }
 +            slb_frac[i] = dbl;
 +            size_string += n;
 +            tot += slb_frac[i];
 +        }
 +        /* Normalize */
 +        if (fplog)
 +        {
 +            fprintf(fplog,"Relative cell sizes:");
 +        }
 +        for (i=0; i<nc; i++)
 +        {
 +            slb_frac[i] /= tot;
 +            if (fplog)
 +            {
 +                fprintf(fplog," %5.3f",slb_frac[i]);
 +            }
 +        }
 +        if (fplog)
 +        {
 +            fprintf(fplog,"\n");
 +        }
 +    }
 +    
 +    return slb_frac;
 +}
 +
 +static int multi_body_bondeds_count(gmx_mtop_t *mtop)
 +{
 +    int n,nmol,ftype;
 +    gmx_mtop_ilistloop_t iloop;
 +    t_ilist *il;
 +    
 +    n = 0;
 +    iloop = gmx_mtop_ilistloop_init(mtop);
 +    while (gmx_mtop_ilistloop_next(iloop,&il,&nmol))
 +    {
 +        for(ftype=0; ftype<F_NRE; ftype++)
 +        {
 +            if ((interaction_function[ftype].flags & IF_BOND) &&
 +                NRAL(ftype) >  2)
 +            {
 +                n += nmol*il[ftype].nr/(1 + NRAL(ftype));
 +            }
 +        }
 +  }
 +
 +  return n;
 +}
 +
 +static int dd_nst_env(FILE *fplog,const char *env_var,int def)
 +{
 +    char *val;
 +    int  nst;
 +    
 +    nst = def;
 +    val = getenv(env_var);
 +    if (val)
 +    {
 +        if (sscanf(val,"%d",&nst) <= 0)
 +        {
 +            nst = 1;
 +        }
 +        if (fplog)
 +        {
 +            fprintf(fplog,"Found env.var. %s = %s, using value %d\n",
 +                    env_var,val,nst);
 +        }
 +    }
 +    
 +    return nst;
 +}
 +
 +static void dd_warning(t_commrec *cr,FILE *fplog,const char *warn_string)
 +{
 +    if (MASTER(cr))
 +    {
 +        fprintf(stderr,"\n%s\n",warn_string);
 +    }
 +    if (fplog)
 +    {
 +        fprintf(fplog,"\n%s\n",warn_string);
 +    }
 +}
 +
 +static void check_dd_restrictions(t_commrec *cr,gmx_domdec_t *dd,
 +                                  t_inputrec *ir,FILE *fplog)
 +{
 +    if (ir->ePBC == epbcSCREW &&
 +        (dd->nc[XX] == 1 || dd->nc[YY] > 1 || dd->nc[ZZ] > 1))
 +    {
 +        gmx_fatal(FARGS,"With pbc=%s can only do domain decomposition in the x-direction",epbc_names[ir->ePBC]);
 +    }
 +
 +    if (ir->ns_type == ensSIMPLE)
 +    {
 +        gmx_fatal(FARGS,"Domain decomposition does not support simple neighbor searching, use grid searching or use particle decomposition");
 +    }
 +
 +    if (ir->nstlist == 0)
 +    {
 +        gmx_fatal(FARGS,"Domain decomposition does not work with nstlist=0");
 +    }
 +
 +    if (ir->comm_mode == ecmANGULAR && ir->ePBC != epbcNONE)
 +    {
 +        dd_warning(cr,fplog,"comm-mode angular will give incorrect results when the comm group partially crosses a periodic boundary");
 +    }
 +}
 +
 +static real average_cellsize_min(gmx_domdec_t *dd,gmx_ddbox_t *ddbox)
 +{
 +    int  di,d;
 +    real r;
 +
 +    r = ddbox->box_size[XX];
 +    for(di=0; di<dd->ndim; di++)
 +    {
 +        d = dd->dim[di];
 +        /* Check using the initial average cell size */
 +        r = min(r,ddbox->box_size[d]*ddbox->skew_fac[d]/dd->nc[d]);
 +    }
 +
 +    return r;
 +}
 +
 +static int check_dlb_support(FILE *fplog,t_commrec *cr,
 +                             const char *dlb_opt,gmx_bool bRecordLoad,
 +                             unsigned long Flags,t_inputrec *ir)
 +{
 +    gmx_domdec_t *dd;
 +    int  eDLB=-1;
 +    char buf[STRLEN];
 +
 +    switch (dlb_opt[0])
 +    {
 +    case 'a': eDLB = edlbAUTO; break;
 +    case 'n': eDLB = edlbNO;   break;
 +    case 'y': eDLB = edlbYES;  break;
 +    default: gmx_incons("Unknown dlb_opt");
 +    }
 +
 +    if (Flags & MD_RERUN)
 +    {
 +        return edlbNO;
 +    }
 +
 +    if (!EI_DYNAMICS(ir->eI))
 +    {
 +        if (eDLB == edlbYES)
 +        {
 +            sprintf(buf,"NOTE: dynamic load balancing is only supported with dynamics, not with integrator '%s'\n",EI(ir->eI));
 +            dd_warning(cr,fplog,buf);
 +        }
 +            
 +        return edlbNO;
 +    }
 +
 +    if (!bRecordLoad)
 +    {
 +        dd_warning(cr,fplog,"NOTE: Cycle counting is not supported on this architecture, will not use dynamic load balancing\n");
 +
 +        return edlbNO;
 +    }
 +
 +    if (Flags & MD_REPRODUCIBLE)
 +    {
 +        switch (eDLB)
 +        {
 +                      case edlbNO: 
 +                              break;
 +                      case edlbAUTO:
 +                              dd_warning(cr,fplog,"NOTE: reproducibility requested, will not use dynamic load balancing\n");
 +                              eDLB = edlbNO;
 +                              break;
 +                      case edlbYES:
 +                              dd_warning(cr,fplog,"WARNING: reproducibility requested with dynamic load balancing, the simulation will NOT be binary reproducible\n");
 +                              break;
 +                      default:
 +                              gmx_fatal(FARGS,"Death horror: undefined case (%d) for load balancing choice",eDLB);
 +                              break;
 +        }
 +    }
 +
 +    return eDLB;
 +}
 +
 +static void set_dd_dim(FILE *fplog,gmx_domdec_t *dd)
 +{
 +    int dim;
 +
 +    dd->ndim = 0;
 +    if (getenv("GMX_DD_ORDER_ZYX") != NULL)
 +    {
 +        /* Decomposition order z,y,x */
 +        if (fplog)
 +        {
 +            fprintf(fplog,"Using domain decomposition order z, y, x\n");
 +        }
 +        for(dim=DIM-1; dim>=0; dim--)
 +        {
 +            if (dd->nc[dim] > 1)
 +            {
 +                dd->dim[dd->ndim++] = dim;
 +            }
 +        }
 +    }
 +    else
 +    {
 +        /* Decomposition order x,y,z */
 +        for(dim=0; dim<DIM; dim++)
 +        {
 +            if (dd->nc[dim] > 1)
 +            {
 +                dd->dim[dd->ndim++] = dim;
 +            }
 +        }
 +    }
 +}
 +
 +static gmx_domdec_comm_t *init_dd_comm()
 +{
 +    gmx_domdec_comm_t *comm;
 +    int  i;
 +
 +    snew(comm,1);
 +    snew(comm->cggl_flag,DIM*2);
 +    snew(comm->cgcm_state,DIM*2);
 +    for(i=0; i<DIM*2; i++)
 +    {
 +        comm->cggl_flag_nalloc[i]  = 0;
 +        comm->cgcm_state_nalloc[i] = 0;
 +    }
 +    
 +    comm->nalloc_int = 0;
 +    comm->buf_int    = NULL;
 +
 +    vec_rvec_init(&comm->vbuf);
 +
 +    comm->n_load_have    = 0;
 +    comm->n_load_collect = 0;
 +
 +    for(i=0; i<ddnatNR-ddnatZONE; i++)
 +    {
 +        comm->sum_nat[i] = 0;
 +    }
 +    comm->ndecomp = 0;
 +    comm->nload   = 0;
 +    comm->load_step = 0;
 +    comm->load_sum  = 0;
 +    comm->load_max  = 0;
 +    clear_ivec(comm->load_lim);
 +    comm->load_mdf  = 0;
 +    comm->load_pme  = 0;
 +
 +    return comm;
 +}
 +
 +gmx_domdec_t *init_domain_decomposition(FILE *fplog,t_commrec *cr,
 +                                        unsigned long Flags,
 +                                        ivec nc,
 +                                        real comm_distance_min,real rconstr,
 +                                        const char *dlb_opt,real dlb_scale,
 +                                        const char *sizex,const char *sizey,const char *sizez,
 +                                        gmx_mtop_t *mtop,t_inputrec *ir,
 +                                        matrix box,rvec *x,
 +                                        gmx_ddbox_t *ddbox,
 +                                        int *npme_x,int *npme_y)
 +{
 +    gmx_domdec_t *dd;
 +    gmx_domdec_comm_t *comm;
 +    int  recload;
 +    int  d,i,j;
 +    real r_2b,r_mb,r_bonded=-1,r_bonded_limit=-1,limit,acs;
 +    gmx_bool bC;
 +    char buf[STRLEN];
 +    
 +    if (fplog)
 +    {
 +        fprintf(fplog,
 +                "\nInitializing Domain Decomposition on %d nodes\n",cr->nnodes);
 +    }
 +    
 +    snew(dd,1);
 +
 +    dd->comm = init_dd_comm();
 +    comm = dd->comm;
 +    snew(comm->cggl_flag,DIM*2);
 +    snew(comm->cgcm_state,DIM*2);
 +
 +    dd->npbcdim   = ePBC2npbcdim(ir->ePBC);
 +    dd->bScrewPBC = (ir->ePBC == epbcSCREW);
 +    
 +    dd->bSendRecv2      = dd_nst_env(fplog,"GMX_DD_SENDRECV2",0);
 +    comm->dlb_scale_lim = dd_nst_env(fplog,"GMX_DLB_MAX",10);
 +    comm->eFlop         = dd_nst_env(fplog,"GMX_DLB_FLOP",0);
 +    recload             = dd_nst_env(fplog,"GMX_DD_LOAD",1);
 +    comm->nstSortCG     = dd_nst_env(fplog,"GMX_DD_SORT",1);
 +    comm->nstDDDump     = dd_nst_env(fplog,"GMX_DD_DUMP",0);
 +    comm->nstDDDumpGrid = dd_nst_env(fplog,"GMX_DD_DUMP_GRID",0);
 +    comm->DD_debug      = dd_nst_env(fplog,"GMX_DD_DEBUG",0);
 +
 +    dd->pme_recv_f_alloc = 0;
 +    dd->pme_recv_f_buf = NULL;
 +
 +    if (dd->bSendRecv2 && fplog)
 +    {
 +        fprintf(fplog,"Will use two sequential MPI_Sendrecv calls instead of two simultaneous non-blocking MPI_Irecv and MPI_Isend pairs for constraint and vsite communication\n");
 +    }
 +    if (comm->eFlop)
 +    {
 +        if (fplog)
 +        {
 +            fprintf(fplog,"Will load balance based on FLOP count\n");
 +        }
 +        if (comm->eFlop > 1)
 +        {
 +            srand(1+cr->nodeid);
 +        }
 +        comm->bRecordLoad = TRUE;
 +    }
 +    else
 +    {
 +        comm->bRecordLoad = (wallcycle_have_counter() && recload > 0);
 +                             
 +    }
 +    
 +    comm->eDLB = check_dlb_support(fplog,cr,dlb_opt,comm->bRecordLoad,Flags,ir);
 +    
 +    comm->bDynLoadBal = (comm->eDLB == edlbYES);
 +    if (fplog)
 +    {
 +        fprintf(fplog,"Dynamic load balancing: %s\n",edlb_names[comm->eDLB]);
 +    }
 +    dd->bGridJump = comm->bDynLoadBal;
 +    
 +    if (comm->nstSortCG)
 +    {
 +        if (fplog)
 +        {
 +            if (comm->nstSortCG == 1)
 +            {
 +                fprintf(fplog,"Will sort the charge groups at every domain (re)decomposition\n");
 +            }
 +            else
 +            {
 +                fprintf(fplog,"Will sort the charge groups every %d steps\n",
 +                        comm->nstSortCG);
 +            }
 +        }
 +        snew(comm->sort,1);
 +    }
 +    else
 +    {
 +        if (fplog)
 +        {
 +            fprintf(fplog,"Will not sort the charge groups\n");
 +        }
 +    }
 +
 +    comm->bCGs = (ncg_mtop(mtop) < mtop->natoms);
 +    
 +    comm->bInterCGBondeds = (ncg_mtop(mtop) > mtop->mols.nr);
 +    if (comm->bInterCGBondeds)
 +    {
 +        comm->bInterCGMultiBody = (multi_body_bondeds_count(mtop) > 0);
 +    }
 +    else
 +    {
 +        comm->bInterCGMultiBody = FALSE;
 +    }
 +    
 +    dd->bInterCGcons    = inter_charge_group_constraints(mtop);
 +    dd->bInterCGsettles = inter_charge_group_settles(mtop);
 +
 +    if (ir->rlistlong == 0)
 +    {
 +        /* Set the cut-off to some very large value,
 +         * so we don't need if statements everywhere in the code.
 +         * We use sqrt, since the cut-off is squared in some places.
 +         */
 +        comm->cutoff   = GMX_CUTOFF_INF;
 +    }
 +    else
 +    {
 +        comm->cutoff   = ir->rlistlong;
 +    }
 +    comm->cutoff_mbody = 0;
 +    
 +    comm->cellsize_limit = 0;
 +    comm->bBondComm = FALSE;
 +
 +    if (comm->bInterCGBondeds)
 +    {
 +        if (comm_distance_min > 0)
 +        {
 +            comm->cutoff_mbody = comm_distance_min;
 +            if (Flags & MD_DDBONDCOMM)
 +            {
 +                comm->bBondComm = (comm->cutoff_mbody > comm->cutoff);
 +            }
 +            else
 +            {
 +                comm->cutoff = max(comm->cutoff,comm->cutoff_mbody);
 +            }
 +            r_bonded_limit = comm->cutoff_mbody;
 +        }
 +        else if (ir->bPeriodicMols)
 +        {
 +            /* Can not easily determine the required cut-off */
 +            dd_warning(cr,fplog,"NOTE: Periodic molecules are present in this system. Because of this, the domain decomposition algorithm cannot easily determine the minimum cell size that it requires for treating bonded interactions. Instead, domain decomposition will assume that half the non-bonded cut-off will be a suitable lower bound.\n");
 +            comm->cutoff_mbody = comm->cutoff/2;
 +            r_bonded_limit = comm->cutoff_mbody;
 +        }
 +        else
 +        {
 +            if (MASTER(cr))
 +            {
 +                dd_bonded_cg_distance(fplog,dd,mtop,ir,x,box,
 +                                      Flags & MD_DDBONDCHECK,&r_2b,&r_mb);
 +            }
 +            gmx_bcast(sizeof(r_2b),&r_2b,cr);
 +            gmx_bcast(sizeof(r_mb),&r_mb,cr);
 +
 +            /* We use an initial margin of 10% for the minimum cell size,
 +             * except when we are just below the non-bonded cut-off.
 +             */
 +            if (Flags & MD_DDBONDCOMM)
 +            {
 +                if (max(r_2b,r_mb) > comm->cutoff)
 +                {
 +                    r_bonded       = max(r_2b,r_mb);
 +                    r_bonded_limit = 1.1*r_bonded;
 +                    comm->bBondComm = TRUE;
 +                }
 +                else
 +                {
 +                    r_bonded       = r_mb;
 +                    r_bonded_limit = min(1.1*r_bonded,comm->cutoff);
 +                }
 +                /* We determine cutoff_mbody later */
 +            }
 +            else
 +            {
 +                /* No special bonded communication,
 +                 * simply increase the DD cut-off.
 +                 */
 +                r_bonded_limit     = 1.1*max(r_2b,r_mb);
 +                comm->cutoff_mbody = r_bonded_limit;
 +                comm->cutoff       = max(comm->cutoff,comm->cutoff_mbody);
 +            }
 +        }
 +        comm->cellsize_limit = max(comm->cellsize_limit,r_bonded_limit);
 +        if (fplog)
 +        {
 +            fprintf(fplog,
 +                    "Minimum cell size due to bonded interactions: %.3f nm\n",
 +                    comm->cellsize_limit);
 +        }
 +    }
 +
 +    if (dd->bInterCGcons && rconstr <= 0)
 +    {
 +        /* There is a cell size limit due to the constraints (P-LINCS) */
 +        rconstr = constr_r_max(fplog,mtop,ir);
 +        if (fplog)
 +        {
 +            fprintf(fplog,
 +                    "Estimated maximum distance required for P-LINCS: %.3f nm\n",
 +                    rconstr);
 +            if (rconstr > comm->cellsize_limit)
 +            {
 +                fprintf(fplog,"This distance will limit the DD cell size, you can override this with -rcon\n");
 +            }
 +        }
 +    }
 +    else if (rconstr > 0 && fplog)
 +    {
 +        /* Here we do not check for dd->bInterCGcons,
 +         * because one can also set a cell size limit for virtual sites only
 +         * and at this point we don't know yet if there are intercg v-sites.
 +         */
 +        fprintf(fplog,
 +                "User supplied maximum distance required for P-LINCS: %.3f nm\n",
 +                rconstr);
 +    }
 +    comm->cellsize_limit = max(comm->cellsize_limit,rconstr);
 +
 +    comm->cgs_gl = gmx_mtop_global_cgs(mtop);
 +
 +    if (nc[XX] > 0)
 +    {
 +        copy_ivec(nc,dd->nc);
 +        set_dd_dim(fplog,dd);
 +        set_ddbox_cr(cr,&dd->nc,ir,box,&comm->cgs_gl,x,ddbox);
 +
 +        if (cr->npmenodes == -1)
 +        {
 +            cr->npmenodes = 0;
 +        }
 +        acs = average_cellsize_min(dd,ddbox);
 +        if (acs < comm->cellsize_limit)
 +        {
 +            if (fplog)
 +            {
 +                fprintf(fplog,"ERROR: The initial cell size (%f) is smaller than the cell size limit (%f)\n",acs,comm->cellsize_limit);
 +            }
 +            gmx_fatal_collective(FARGS,cr,NULL,
 +                                 "The initial cell size (%f) is smaller than the cell size limit (%f), change options -dd, -rdd or -rcon, see the log file for details",
 +                                 acs,comm->cellsize_limit);
 +        }
 +    }
 +    else
 +    {
 +        set_ddbox_cr(cr,NULL,ir,box,&comm->cgs_gl,x,ddbox);
 +
 +        /* We need to choose the optimal DD grid and possibly PME nodes */
 +        limit = dd_choose_grid(fplog,cr,dd,ir,mtop,box,ddbox,
 +                               comm->eDLB!=edlbNO,dlb_scale,
 +                               comm->cellsize_limit,comm->cutoff,
 +                               comm->bInterCGBondeds,comm->bInterCGMultiBody);
 +        
 +        if (dd->nc[XX] == 0)
 +        {
 +            bC = (dd->bInterCGcons && rconstr > r_bonded_limit);
 +            sprintf(buf,"Change the number of nodes or mdrun option %s%s%s",
 +                    !bC ? "-rdd" : "-rcon",
 +                    comm->eDLB!=edlbNO ? " or -dds" : "",
 +                    bC ? " or your LINCS settings" : "");
 +
 +            gmx_fatal_collective(FARGS,cr,NULL,
 +                                 "There is no domain decomposition for %d nodes that is compatible with the given box and a minimum cell size of %g nm\n"
 +                                 "%s\n"
 +                                 "Look in the log file for details on the domain decomposition",
 +                                 cr->nnodes-cr->npmenodes,limit,buf);
 +        }
 +        set_dd_dim(fplog,dd);
 +    }
 +
 +    if (fplog)
 +    {
 +        fprintf(fplog,
 +                "Domain decomposition grid %d x %d x %d, separate PME nodes %d\n",
 +                dd->nc[XX],dd->nc[YY],dd->nc[ZZ],cr->npmenodes);
 +    }
 +    
 +    dd->nnodes = dd->nc[XX]*dd->nc[YY]*dd->nc[ZZ];
 +    if (cr->nnodes - dd->nnodes != cr->npmenodes)
 +    {
 +        gmx_fatal_collective(FARGS,cr,NULL,
 +                             "The size of the domain decomposition grid (%d) does not match the number of nodes (%d). The total number of nodes is %d",
 +                             dd->nnodes,cr->nnodes - cr->npmenodes,cr->nnodes);
 +    }
 +    if (cr->npmenodes > dd->nnodes)
 +    {
 +        gmx_fatal_collective(FARGS,cr,NULL,
 +                             "The number of separate PME nodes (%d) is larger than the number of PP nodes (%d), this is not supported.",cr->npmenodes,dd->nnodes);
 +    }
 +    if (cr->npmenodes > 0)
 +    {
 +        comm->npmenodes = cr->npmenodes;
 +    }
 +    else
 +    {
 +        comm->npmenodes = dd->nnodes;
 +    }
 +
 +    if (EEL_PME(ir->coulombtype))
 +    {
 +        /* The following choices should match those
 +         * in comm_cost_est in domdec_setup.c.
 +         * Note that here the checks have to take into account
 +         * that the decomposition might occur in a different order than xyz
 +         * (for instance through the env.var. GMX_DD_ORDER_ZYX),
 +         * in which case they will not match those in comm_cost_est,
 +         * but since that is mainly for testing purposes that's fine.
 +         */
 +        if (dd->ndim >= 2 && dd->dim[0] == XX && dd->dim[1] == YY &&
 +            comm->npmenodes > dd->nc[XX] && comm->npmenodes % dd->nc[XX] == 0 &&
 +            getenv("GMX_PMEONEDD") == NULL)
 +        {
 +            comm->npmedecompdim = 2;
 +            comm->npmenodes_x   = dd->nc[XX];
 +            comm->npmenodes_y   = comm->npmenodes/comm->npmenodes_x;
 +        }
 +        else
 +        {
 +            /* In case nc is 1 in both x and y we could still choose to
 +             * decompose pme in y instead of x, but we use x for simplicity.
 +             */
 +            comm->npmedecompdim = 1;
 +            if (dd->dim[0] == YY)
 +            {
 +                comm->npmenodes_x = 1;
 +                comm->npmenodes_y = comm->npmenodes;
 +            }
 +            else
 +            {
 +                comm->npmenodes_x = comm->npmenodes;
 +                comm->npmenodes_y = 1;
 +            }
 +        }    
 +        if (fplog)
 +        {
 +            fprintf(fplog,"PME domain decomposition: %d x %d x %d\n",
 +                    comm->npmenodes_x,comm->npmenodes_y,1);
 +        }
 +    }
 +    else
 +    {
 +        comm->npmedecompdim = 0;
 +        comm->npmenodes_x   = 0;
 +        comm->npmenodes_y   = 0;
 +    }
 +    
 +    /* Technically we don't need both of these,
 +     * but it simplifies code not having to recalculate it.
 +     */
 +    *npme_x = comm->npmenodes_x;
 +    *npme_y = comm->npmenodes_y;
 +        
 +    snew(comm->slb_frac,DIM);
 +    if (comm->eDLB == edlbNO)
 +    {
 +        comm->slb_frac[XX] = get_slb_frac(fplog,"x",dd->nc[XX],sizex);
 +        comm->slb_frac[YY] = get_slb_frac(fplog,"y",dd->nc[YY],sizey);
 +        comm->slb_frac[ZZ] = get_slb_frac(fplog,"z",dd->nc[ZZ],sizez);
 +    }
 +
 +    if (comm->bInterCGBondeds && comm->cutoff_mbody == 0)
 +    {
 +        if (comm->bBondComm || comm->eDLB != edlbNO)
 +        {
 +            /* Set the bonded communication distance to halfway
 +             * the minimum and the maximum,
 +             * since the extra communication cost is nearly zero.
 +             */
 +            acs = average_cellsize_min(dd,ddbox);
 +            comm->cutoff_mbody = 0.5*(r_bonded + acs);
 +            if (comm->eDLB != edlbNO)
 +            {
 +                /* Check if this does not limit the scaling */
 +                comm->cutoff_mbody = min(comm->cutoff_mbody,dlb_scale*acs);
 +            }
 +            if (!comm->bBondComm)
 +            {
 +                /* Without bBondComm do not go beyond the n.b. cut-off */
 +                comm->cutoff_mbody = min(comm->cutoff_mbody,comm->cutoff);
 +                if (comm->cellsize_limit >= comm->cutoff)
 +                {
 +                    /* We don't loose a lot of efficieny
 +                     * when increasing it to the n.b. cut-off.
 +                     * It can even be slightly faster, because we need
 +                     * less checks for the communication setup.
 +                     */
 +                    comm->cutoff_mbody = comm->cutoff;
 +                }
 +            }
 +            /* Check if we did not end up below our original limit */
 +            comm->cutoff_mbody = max(comm->cutoff_mbody,r_bonded_limit);
 +
 +            if (comm->cutoff_mbody > comm->cellsize_limit)
 +            {
 +                comm->cellsize_limit = comm->cutoff_mbody;
 +            }
 +        }
 +        /* Without DLB and cutoff_mbody<cutoff, cutoff_mbody is dynamic */
 +    }
 +
 +    if (debug)
 +    {
 +        fprintf(debug,"Bonded atom communication beyond the cut-off: %d\n"
 +                "cellsize limit %f\n",
 +                comm->bBondComm,comm->cellsize_limit);
 +    }
 +    
 +    if (MASTER(cr))
 +    {
 +        check_dd_restrictions(cr,dd,ir,fplog);
 +    }
 +
 +    comm->partition_step = INT_MIN;
 +    dd->ddp_count = 0;
 +
 +    clear_dd_cycle_counts(dd);
 +
 +    return dd;
 +}
 +
 +static void set_dlb_limits(gmx_domdec_t *dd)
 +
 +{
 +    int d;
 +
 +    for(d=0; d<dd->ndim; d++)
 +    {
 +        dd->comm->cd[d].np = dd->comm->cd[d].np_dlb;
 +        dd->comm->cellsize_min[dd->dim[d]] =
 +            dd->comm->cellsize_min_dlb[dd->dim[d]];
 +    }
 +}
 +
 +
 +static void turn_on_dlb(FILE *fplog,t_commrec *cr,gmx_large_int_t step)
 +{
 +    gmx_domdec_t *dd;
 +    gmx_domdec_comm_t *comm;
 +    real cellsize_min;
 +    int  d,nc,i;
 +    char buf[STRLEN];
 +    
 +    dd = cr->dd;
 +    comm = dd->comm;
 +    
 +    if (fplog)
 +    {
 +        fprintf(fplog,"At step %s the performance loss due to force load imbalance is %.1f %%\n",gmx_step_str(step,buf),dd_force_imb_perf_loss(dd)*100);
 +    }
 +
 +    cellsize_min = comm->cellsize_min[dd->dim[0]];
 +    for(d=1; d<dd->ndim; d++)
 +    {
 +        cellsize_min = min(cellsize_min,comm->cellsize_min[dd->dim[d]]);
 +    }
 +
 +    if (cellsize_min < comm->cellsize_limit*1.05)
 +    {
 +        dd_warning(cr,fplog,"NOTE: the minimum cell size is smaller than 1.05 times the cell size limit, will not turn on dynamic load balancing\n");
 +
 +        /* Change DLB from "auto" to "no". */
 +        comm->eDLB = edlbNO;
 +
 +        return;
 +    }
 +
 +    dd_warning(cr,fplog,"NOTE: Turning on dynamic load balancing\n");
 +    comm->bDynLoadBal = TRUE;
 +    dd->bGridJump = TRUE;
 +    
 +    set_dlb_limits(dd);
 +
 +    /* We can set the required cell size info here,
 +     * so we do not need to communicate this.
 +     * The grid is completely uniform.
 +     */
 +    for(d=0; d<dd->ndim; d++)
 +    {
 +        if (comm->root[d])
 +        {
 +            comm->load[d].sum_m = comm->load[d].sum;
 +
 +            nc = dd->nc[dd->dim[d]];
 +            for(i=0; i<nc; i++)
 +            {
 +                comm->root[d]->cell_f[i]    = i/(real)nc;
 +                if (d > 0)
 +                {
 +                    comm->root[d]->cell_f_max0[i] =  i   /(real)nc;
 +                    comm->root[d]->cell_f_min1[i] = (i+1)/(real)nc;
 +                }
 +            }
 +            comm->root[d]->cell_f[nc] = 1.0;
 +        }
 +    }
 +}
 +
 +static char *init_bLocalCG(gmx_mtop_t *mtop)
 +{
 +    int  ncg,cg;
 +    char *bLocalCG;
 +    
 +    ncg = ncg_mtop(mtop);
 +    snew(bLocalCG,ncg);
 +    for(cg=0; cg<ncg; cg++)
 +    {
 +        bLocalCG[cg] = FALSE;
 +    }
 +
 +    return bLocalCG;
 +}
 +
 +void dd_init_bondeds(FILE *fplog,
 +                     gmx_domdec_t *dd,gmx_mtop_t *mtop,
 +                     gmx_vsite_t *vsite,gmx_constr_t constr,
 +                     t_inputrec *ir,gmx_bool bBCheck,cginfo_mb_t *cginfo_mb)
 +{
 +    gmx_domdec_comm_t *comm;
 +    gmx_bool bBondComm;
 +    int  d;
 +
 +    dd_make_reverse_top(fplog,dd,mtop,vsite,constr,ir,bBCheck);
 +
 +    comm = dd->comm;
 +
 +    if (comm->bBondComm)
 +    {
 +        /* Communicate atoms beyond the cut-off for bonded interactions */
 +        comm = dd->comm;
 +
 +        comm->cglink = make_charge_group_links(mtop,dd,cginfo_mb);
 +
 +        comm->bLocalCG = init_bLocalCG(mtop);
 +    }
 +    else
 +    {
 +        /* Only communicate atoms based on cut-off */
 +        comm->cglink   = NULL;
 +        comm->bLocalCG = NULL;
 +    }
 +}
 +
 +static void print_dd_settings(FILE *fplog,gmx_domdec_t *dd,
 +                              t_inputrec *ir,
 +                              gmx_bool bDynLoadBal,real dlb_scale,
 +                              gmx_ddbox_t *ddbox)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int  d;
 +    ivec np;
 +    real limit,shrink;
 +    char buf[64];
 +
 +    if (fplog == NULL)
 +    {
 +        return;
 +    }
 +
 +    comm = dd->comm;
 +
 +    if (bDynLoadBal)
 +    {
 +        fprintf(fplog,"The maximum number of communication pulses is:");
 +        for(d=0; d<dd->ndim; d++)
 +        {
 +            fprintf(fplog," %c %d",dim2char(dd->dim[d]),comm->cd[d].np_dlb);
 +        }
 +        fprintf(fplog,"\n");
 +        fprintf(fplog,"The minimum size for domain decomposition cells is %.3f nm\n",comm->cellsize_limit);
 +        fprintf(fplog,"The requested allowed shrink of DD cells (option -dds) is: %.2f\n",dlb_scale);
 +        fprintf(fplog,"The allowed shrink of domain decomposition cells is:");
 +        for(d=0; d<DIM; d++)
 +        {
 +            if (dd->nc[d] > 1)
 +            {
 +                if (d >= ddbox->npbcdim && dd->nc[d] == 2)
 +                {
 +                    shrink = 0;
 +                }
 +                else
 +                {
 +                    shrink =
 +                        comm->cellsize_min_dlb[d]/
 +                        (ddbox->box_size[d]*ddbox->skew_fac[d]/dd->nc[d]);
 +                }
 +                fprintf(fplog," %c %.2f",dim2char(d),shrink);
 +            }
 +        }
 +        fprintf(fplog,"\n");
 +    }
 +    else
 +    {
 +        set_dd_cell_sizes_slb(dd,ddbox,FALSE,np);
 +        fprintf(fplog,"The initial number of communication pulses is:");
 +        for(d=0; d<dd->ndim; d++)
 +        {
 +            fprintf(fplog," %c %d",dim2char(dd->dim[d]),np[dd->dim[d]]);
 +        }
 +        fprintf(fplog,"\n");
 +        fprintf(fplog,"The initial domain decomposition cell size is:");
 +        for(d=0; d<DIM; d++) {
 +            if (dd->nc[d] > 1)
 +            {
 +                fprintf(fplog," %c %.2f nm",
 +                        dim2char(d),dd->comm->cellsize_min[d]);
 +            }
 +        }
 +        fprintf(fplog,"\n\n");
 +    }
 +    
 +    if (comm->bInterCGBondeds || dd->vsite_comm || dd->constraint_comm)
 +    {
 +        fprintf(fplog,"The maximum allowed distance for charge groups involved in interactions is:\n");
 +        fprintf(fplog,"%40s  %-7s %6.3f nm\n",
 +                "non-bonded interactions","",comm->cutoff);
 +
 +        if (bDynLoadBal)
 +        {
 +            limit = dd->comm->cellsize_limit;
 +        }
 +        else
 +        {
 +            if (dynamic_dd_box(ddbox,ir))
 +            {
 +                fprintf(fplog,"(the following are initial values, they could change due to box deformation)\n");
 +            }
 +            limit = dd->comm->cellsize_min[XX];
 +            for(d=1; d<DIM; d++)
 +            {
 +                limit = min(limit,dd->comm->cellsize_min[d]);
 +            }
 +        }
 +
 +        if (comm->bInterCGBondeds)
 +        {
 +            fprintf(fplog,"%40s  %-7s %6.3f nm\n",
 +                    "two-body bonded interactions","(-rdd)",
 +                    max(comm->cutoff,comm->cutoff_mbody));
 +            fprintf(fplog,"%40s  %-7s %6.3f nm\n",
 +                    "multi-body bonded interactions","(-rdd)",
 +                    (comm->bBondComm || dd->bGridJump) ? comm->cutoff_mbody : min(comm->cutoff,limit));
 +        }
 +        if (dd->vsite_comm)
 +        {
 +            fprintf(fplog,"%40s  %-7s %6.3f nm\n",
 +                    "virtual site constructions","(-rcon)",limit);
 +        }
 +        if (dd->constraint_comm)
 +        {
 +            sprintf(buf,"atoms separated by up to %d constraints",
 +                    1+ir->nProjOrder);
 +            fprintf(fplog,"%40s  %-7s %6.3f nm\n",
 +                    buf,"(-rcon)",limit);
 +        }
 +        fprintf(fplog,"\n");
 +    }
 +    
 +    fflush(fplog);
 +}
 +
 +static void set_cell_limits_dlb(gmx_domdec_t *dd,
 +                                real dlb_scale,
 +                                const t_inputrec *ir,
 +                                const gmx_ddbox_t *ddbox)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int  d,dim,npulse,npulse_d_max,npulse_d;
 +    gmx_bool bNoCutOff;
 +
 +    comm = dd->comm;
 +
 +    bNoCutOff = (ir->rvdw == 0 || ir->rcoulomb == 0);
 +
 +    /* Determine the maximum number of comm. pulses in one dimension */
 +        
 +    comm->cellsize_limit = max(comm->cellsize_limit,comm->cutoff_mbody);
 +        
 +    /* Determine the maximum required number of grid pulses */
 +    if (comm->cellsize_limit >= comm->cutoff)
 +    {
 +        /* Only a single pulse is required */
 +        npulse = 1;
 +    }
 +    else if (!bNoCutOff && comm->cellsize_limit > 0)
 +    {
 +        /* We round down slightly here to avoid overhead due to the latency
 +         * of extra communication calls when the cut-off
 +         * would be only slightly longer than the cell size.
 +         * Later cellsize_limit is redetermined,
 +         * so we can not miss interactions due to this rounding.
 +         */
 +        npulse = (int)(0.96 + comm->cutoff/comm->cellsize_limit);
 +    }
 +    else
 +    {
 +        /* There is no cell size limit */
 +        npulse = max(dd->nc[XX]-1,max(dd->nc[YY]-1,dd->nc[ZZ]-1));
 +    }
 +
 +    if (!bNoCutOff && npulse > 1)
 +    {
 +        /* See if we can do with less pulses, based on dlb_scale */
 +        npulse_d_max = 0;
 +        for(d=0; d<dd->ndim; d++)
 +        {
 +            dim = dd->dim[d];
 +            npulse_d = (int)(1 + dd->nc[dim]*comm->cutoff
 +                             /(ddbox->box_size[dim]*ddbox->skew_fac[dim]*dlb_scale));
 +            npulse_d_max = max(npulse_d_max,npulse_d);
 +        }
 +        npulse = min(npulse,npulse_d_max);
 +    }
 +
 +    /* This env var can override npulse */
 +    d = dd_nst_env(debug,"GMX_DD_NPULSE",0);
 +    if (d > 0)
 +    {
 +        npulse = d;
 +    }
 +
 +    comm->maxpulse = 1;
 +    comm->bVacDLBNoLimit = (ir->ePBC == epbcNONE);
 +    for(d=0; d<dd->ndim; d++)
 +    {
 +        comm->cd[d].np_dlb = min(npulse,dd->nc[dd->dim[d]]-1);
 +        comm->cd[d].np_nalloc = comm->cd[d].np_dlb;
 +        snew(comm->cd[d].ind,comm->cd[d].np_nalloc);
 +        comm->maxpulse = max(comm->maxpulse,comm->cd[d].np_dlb);
 +        if (comm->cd[d].np_dlb < dd->nc[dd->dim[d]]-1)
 +        {
 +            comm->bVacDLBNoLimit = FALSE;
 +        }
 +    }
 +
 +    /* cellsize_limit is set for LINCS in init_domain_decomposition */
 +    if (!comm->bVacDLBNoLimit)
 +    {
 +        comm->cellsize_limit = max(comm->cellsize_limit,
 +                                   comm->cutoff/comm->maxpulse);
 +    }
 +    comm->cellsize_limit = max(comm->cellsize_limit,comm->cutoff_mbody);
 +    /* Set the minimum cell size for each DD dimension */
 +    for(d=0; d<dd->ndim; d++)
 +    {
 +        if (comm->bVacDLBNoLimit ||
 +            comm->cd[d].np_dlb*comm->cellsize_limit >= comm->cutoff)
 +        {
 +            comm->cellsize_min_dlb[dd->dim[d]] = comm->cellsize_limit;
 +        }
 +        else
 +        {
 +            comm->cellsize_min_dlb[dd->dim[d]] =
 +                comm->cutoff/comm->cd[d].np_dlb;
 +        }
 +    }
 +    if (comm->cutoff_mbody <= 0)
 +    {
 +        comm->cutoff_mbody = min(comm->cutoff,comm->cellsize_limit);
 +    }
 +    if (comm->bDynLoadBal)
 +    {
 +        set_dlb_limits(dd);
 +    }
 +}
 +
 +gmx_bool dd_bonded_molpbc(gmx_domdec_t *dd,int ePBC)
 +{
 +    /* If each molecule is a single charge group
 +     * or we use domain decomposition for each periodic dimension,
 +     * we do not need to take pbc into account for the bonded interactions.
 +     */
 +    return (ePBC != epbcNONE && dd->comm->bInterCGBondeds &&
 +            !(dd->nc[XX]>1 &&
 +              dd->nc[YY]>1 &&
 +              (dd->nc[ZZ]>1 || ePBC==epbcXY)));
 +}
 +
 +void set_dd_parameters(FILE *fplog,gmx_domdec_t *dd,real dlb_scale,
 +                       t_inputrec *ir,t_forcerec *fr,
 +                       gmx_ddbox_t *ddbox)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int  natoms_tot;
 +    real vol_frac;
 +
 +    comm = dd->comm;
 +
 +    /* Initialize the thread data.
 +     * This can not be done in init_domain_decomposition,
 +     * as the numbers of threads is determined later.
 +     */
 +    comm->nth = gmx_omp_nthreads_get(emntDomdec);
 +    if (comm->nth > 1)
 +    {
 +        snew(comm->dth,comm->nth);
 +    }
 +
 +    if (EEL_PME(ir->coulombtype))
 +    {
 +        init_ddpme(dd,&comm->ddpme[0],0);
 +        if (comm->npmedecompdim >= 2)
 +        {
 +            init_ddpme(dd,&comm->ddpme[1],1);
 +        }
 +    }
 +    else
 +    {
 +        comm->npmenodes = 0;
 +        if (dd->pme_nodeid >= 0)
 +        {
 +            gmx_fatal_collective(FARGS,NULL,dd,
 +                                 "Can not have separate PME nodes without PME electrostatics");
 +        }
 +    }
 +        
 +    if (debug)
 +    {
 +        fprintf(debug,"The DD cut-off is %f\n",comm->cutoff);
 +    }
 +    if (comm->eDLB != edlbNO)
 +    {
 +        set_cell_limits_dlb(dd,dlb_scale,ir,ddbox);
 +    }
 +    
 +    print_dd_settings(fplog,dd,ir,comm->bDynLoadBal,dlb_scale,ddbox);
 +    if (comm->eDLB == edlbAUTO)
 +    {
 +        if (fplog)
 +        {
 +            fprintf(fplog,"When dynamic load balancing gets turned on, these settings will change to:\n");
 +        }
 +        print_dd_settings(fplog,dd,ir,TRUE,dlb_scale,ddbox);
 +    }
 +
 +    if (ir->ePBC == epbcNONE)
 +    {
 +        vol_frac = 1 - 1/(double)dd->nnodes;
 +    }
 +    else
 +    {
 +        vol_frac =
 +            (1 + comm_box_frac(dd->nc,comm->cutoff,ddbox))/(double)dd->nnodes;
 +    }
 +    if (debug)
 +    {
 +        fprintf(debug,"Volume fraction for all DD zones: %f\n",vol_frac);
 +    }
 +    natoms_tot = comm->cgs_gl.index[comm->cgs_gl.nr];
 +   
 +    dd->ga2la = ga2la_init(natoms_tot,vol_frac*natoms_tot);
 +}
 +
 +gmx_bool change_dd_cutoff(t_commrec *cr,t_state *state,t_inputrec *ir,
 +                          real cutoff_req)
 +{
 +    gmx_domdec_t *dd;
 +    gmx_ddbox_t ddbox;
 +    int d,dim,np;
 +    real inv_cell_size;
 +    int LocallyLimited;
 +
 +    dd = cr->dd;
 +
 +    set_ddbox(dd,FALSE,cr,ir,state->box,
 +              TRUE,&dd->comm->cgs_gl,state->x,&ddbox);
 +
 +    LocallyLimited = 0;
 +
 +    for(d=0; d<dd->ndim; d++)
 +    {
 +        dim = dd->dim[d];
 +
 +        inv_cell_size = DD_CELL_MARGIN*dd->nc[dim]/ddbox.box_size[dim];
 +        if (dynamic_dd_box(&ddbox,ir))
 +        {
 +            inv_cell_size *= DD_PRES_SCALE_MARGIN;
 +        }
 +
 +        np = 1 + (int)(cutoff_req*inv_cell_size*ddbox.skew_fac[dim]);
 +
 +        if (dd->comm->eDLB != edlbNO && dim < ddbox.npbcdim &&
 +            dd->comm->cd[d].np_dlb > 0)
 +        {
 +            if (np > dd->comm->cd[d].np_dlb)
 +            {
 +                return FALSE;
 +            }
 +
 +            /* If a current local cell size is smaller than the requested
 +             * cut-off, we could still fix it, but this gets very complicated.
 +             * Without fixing here, we might actually need more checks.
 +             */
 +            if ((dd->comm->cell_x1[dim] - dd->comm->cell_x0[dim])*ddbox.skew_fac[dim]*dd->comm->cd[d].np_dlb < cutoff_req)
 +            {
 +                LocallyLimited = 1;
 +            }
 +        }
 +    }
 +
 +    if (dd->comm->eDLB != edlbNO)
 +    {
 +        /* If DLB is not active yet, we don't need to check the grid jumps.
 +         * Actually we shouldn't, because then the grid jump data is not set.
 +         */
 +        if (dd->comm->bDynLoadBal &&
 +            check_grid_jump(0,dd,cutoff_req,&ddbox,FALSE))
 +        {
 +            LocallyLimited = 1; 
 +        }
 +
 +        gmx_sumi(1,&LocallyLimited,cr);
 +
 +        if (LocallyLimited > 0)
 +        {
 +            return FALSE;
 +        }
 +    }
 +
 +    dd->comm->cutoff = cutoff_req;
 +
 +    return TRUE;
 +}
 +
 +static void merge_cg_buffers(int ncell,
 +                             gmx_domdec_comm_dim_t *cd, int pulse,
 +                             int  *ncg_cell,
 +                             int  *index_gl, int  *recv_i,
 +                             rvec *cg_cm,    rvec *recv_vr,
 +                             int *cgindex,
 +                             cginfo_mb_t *cginfo_mb,int *cginfo)
 +{
 +    gmx_domdec_ind_t *ind,*ind_p;
 +    int p,cell,c,cg,cg0,cg1,cg_gl,nat;
 +    int shift,shift_at;
 +    
 +    ind = &cd->ind[pulse];
 +    
 +    /* First correct the already stored data */
 +    shift = ind->nrecv[ncell];
 +    for(cell=ncell-1; cell>=0; cell--)
 +    {
 +        shift -= ind->nrecv[cell];
 +        if (shift > 0)
 +        {
 +            /* Move the cg's present from previous grid pulses */
 +            cg0 = ncg_cell[ncell+cell];
 +            cg1 = ncg_cell[ncell+cell+1];
 +            cgindex[cg1+shift] = cgindex[cg1];
 +            for(cg=cg1-1; cg>=cg0; cg--)
 +            {
 +                index_gl[cg+shift] = index_gl[cg];
 +                copy_rvec(cg_cm[cg],cg_cm[cg+shift]);
 +                cgindex[cg+shift] = cgindex[cg];
 +                cginfo[cg+shift] = cginfo[cg];
 +            }
 +            /* Correct the already stored send indices for the shift */
 +            for(p=1; p<=pulse; p++)
 +            {
 +                ind_p = &cd->ind[p];
 +                cg0 = 0;
 +                for(c=0; c<cell; c++)
 +                {
 +                    cg0 += ind_p->nsend[c];
 +                }
 +                cg1 = cg0 + ind_p->nsend[cell];
 +                for(cg=cg0; cg<cg1; cg++)
 +                {
 +                    ind_p->index[cg] += shift;
 +                }
 +            }
 +        }
 +    }
 +
 +    /* Merge in the communicated buffers */
 +    shift = 0;
 +    shift_at = 0;
 +    cg0 = 0;
 +    for(cell=0; cell<ncell; cell++)
 +    {
 +        cg1 = ncg_cell[ncell+cell+1] + shift;
 +        if (shift_at > 0)
 +        {
 +            /* Correct the old cg indices */
 +            for(cg=ncg_cell[ncell+cell]; cg<cg1; cg++)
 +            {
 +                cgindex[cg+1] += shift_at;
 +            }
 +        }
 +        for(cg=0; cg<ind->nrecv[cell]; cg++)
 +        {
 +            /* Copy this charge group from the buffer */
 +            index_gl[cg1] = recv_i[cg0];
 +            copy_rvec(recv_vr[cg0],cg_cm[cg1]);
 +            /* Add it to the cgindex */
 +            cg_gl = index_gl[cg1];
 +            cginfo[cg1] = ddcginfo(cginfo_mb,cg_gl);
 +            nat = GET_CGINFO_NATOMS(cginfo[cg1]);
 +            cgindex[cg1+1] = cgindex[cg1] + nat;
 +            cg0++;
 +            cg1++;
 +            shift_at += nat;
 +        }
 +        shift += ind->nrecv[cell];
 +        ncg_cell[ncell+cell+1] = cg1;
 +    }
 +}
 +
 +static void make_cell2at_index(gmx_domdec_comm_dim_t *cd,
 +                               int nzone,int cg0,const int *cgindex)
 +{
 +    int cg,zone,p;
 +    
 +    /* Store the atom block boundaries for easy copying of communication buffers
 +     */
 +    cg = cg0;
 +    for(zone=0; zone<nzone; zone++)
 +    {
 +        for(p=0; p<cd->np; p++) {
 +            cd->ind[p].cell2at0[zone] = cgindex[cg];
 +            cg += cd->ind[p].nrecv[zone];
 +            cd->ind[p].cell2at1[zone] = cgindex[cg];
 +        }
 +    }
 +}
 +
 +static gmx_bool missing_link(t_blocka *link,int cg_gl,char *bLocalCG)
 +{
 +    int  i;
 +    gmx_bool bMiss;
 +
 +    bMiss = FALSE;
 +    for(i=link->index[cg_gl]; i<link->index[cg_gl+1]; i++)
 +    {
 +        if (!bLocalCG[link->a[i]])
 +        {
 +            bMiss = TRUE;
 +        }
 +    }
 +
 +    return bMiss;
 +}
 +
 +/* Domain corners for communication, a maximum of 4 i-zones see a j domain */
 +typedef struct {
 +    real c[DIM][4]; /* the corners for the non-bonded communication */
 +    real cr0;       /* corner for rounding */
 +    real cr1[4];    /* corners for rounding */
 +    real bc[DIM];   /* corners for bounded communication */
 +    real bcr1;      /* corner for rounding for bonded communication */
 +} dd_corners_t;
 +
 +/* Determine the corners of the domain(s) we are communicating with */
 +static void
 +set_dd_corners(const gmx_domdec_t *dd,
 +               int dim0, int dim1, int dim2,
 +               gmx_bool bDistMB,
 +               dd_corners_t *c)
 +{
 +    const gmx_domdec_comm_t *comm;
 +    const gmx_domdec_zones_t *zones;
 +    int i,j;
 +
 +    comm = dd->comm;
 +
 +    zones = &comm->zones;
 +
 +    /* Keep the compiler happy */
 +    c->cr0  = 0;
 +    c->bcr1 = 0;
 +
 +    /* The first dimension is equal for all cells */
 +    c->c[0][0] = comm->cell_x0[dim0];
 +    if (bDistMB)
 +    {
 +        c->bc[0] = c->c[0][0];
 +    }
 +    if (dd->ndim >= 2)
 +    {
 +        dim1 = dd->dim[1];
 +        /* This cell row is only seen from the first row */
 +        c->c[1][0] = comm->cell_x0[dim1];
 +        /* All rows can see this row */
 +        c->c[1][1] = comm->cell_x0[dim1];
 +        if (dd->bGridJump)
 +        {
 +            c->c[1][1] = max(comm->cell_x0[dim1],comm->zone_d1[1].mch0);
 +            if (bDistMB)
 +            {
 +                /* For the multi-body distance we need the maximum */
 +                c->bc[1] = max(comm->cell_x0[dim1],comm->zone_d1[1].p1_0);
 +            }
 +        }
 +        /* Set the upper-right corner for rounding */
 +        c->cr0 = comm->cell_x1[dim0];
 +        
 +        if (dd->ndim >= 3)
 +        {
 +            dim2 = dd->dim[2];
 +            for(j=0; j<4; j++)
 +            {
 +                c->c[2][j] = comm->cell_x0[dim2];
 +            }
 +            if (dd->bGridJump)
 +            {
 +                /* Use the maximum of the i-cells that see a j-cell */
 +                for(i=0; i<zones->nizone; i++)
 +                {
 +                    for(j=zones->izone[i].j0; j<zones->izone[i].j1; j++)
 +                    {
 +                        if (j >= 4)
 +                        {
 +                            c->c[2][j-4] =
 +                                max(c->c[2][j-4],
 +                                    comm->zone_d2[zones->shift[i][dim0]][zones->shift[i][dim1]].mch0);
 +                        }
 +                    }
 +                }
 +                if (bDistMB)
 +                {
 +                    /* For the multi-body distance we need the maximum */
 +                    c->bc[2] = comm->cell_x0[dim2];
 +                    for(i=0; i<2; i++)
 +                    {
 +                        for(j=0; j<2; j++)
 +                        {
 +                            c->bc[2] = max(c->bc[2],comm->zone_d2[i][j].p1_0);
 +                        }
 +                    }
 +                }
 +            }
 +            
 +            /* Set the upper-right corner for rounding */
 +            /* Cell (0,0,0) and cell (1,0,0) can see cell 4 (0,1,1)
 +             * Only cell (0,0,0) can see cell 7 (1,1,1)
 +             */
 +            c->cr1[0] = comm->cell_x1[dim1];
 +            c->cr1[3] = comm->cell_x1[dim1];
 +            if (dd->bGridJump)
 +            {
 +                c->cr1[0] = max(comm->cell_x1[dim1],comm->zone_d1[1].mch1);
 +                if (bDistMB)
 +                {
 +                    /* For the multi-body distance we need the maximum */
 +                    c->bcr1 = max(comm->cell_x1[dim1],comm->zone_d1[1].p1_1);
 +                }
 +            }
 +        }
 +    }
 +}
 +
 +/* Determine which cg's we need to send in this pulse from this zone */
 +static void
 +get_zone_pulse_cgs(gmx_domdec_t *dd,
 +                   int zonei, int zone,
 +                   int cg0, int cg1,
 +                   const int *index_gl,
 +                   const int *cgindex,
 +                   int dim, int dim_ind,
 +                   int dim0, int dim1, int dim2,
 +                   real r_comm2, real r_bcomm2,
 +                   matrix box,
 +                   ivec tric_dist,
 +                   rvec *normal,
 +                   real skew_fac2_d, real skew_fac_01,
 +                   rvec *v_d, rvec *v_0, rvec *v_1,
 +                   const dd_corners_t *c,
 +                   rvec sf2_round,
 +                   gmx_bool bDistBonded,
 +                   gmx_bool bBondComm,
 +                   gmx_bool bDist2B,
 +                   gmx_bool bDistMB,
 +                   rvec *cg_cm,
 +                   int *cginfo,
 +                   gmx_domdec_ind_t *ind,
 +                   int **ibuf, int *ibuf_nalloc,
 +                   vec_rvec_t *vbuf,
 +                   int *nsend_ptr,
 +                   int *nat_ptr,
 +                   int *nsend_z_ptr)
 +{
 +    gmx_domdec_comm_t *comm;
 +    gmx_bool bScrew;
 +    gmx_bool bDistMB_pulse;
 +    int  cg,i;
 +    real r2,rb2,r,tric_sh;
 +    rvec rn,rb;
 +    int  dimd;
 +    int  nsend_z,nsend,nat;
 +
 +    comm = dd->comm;
 +
 +    bScrew = (dd->bScrewPBC && dim == XX);
 +
 +    bDistMB_pulse = (bDistMB && bDistBonded);
 +
 +    nsend_z = 0;
 +    nsend   = *nsend_ptr;
 +    nat     = *nat_ptr;
 +
 +    for(cg=cg0; cg<cg1; cg++)
 +    {
 +        r2  = 0;
 +        rb2 = 0;
 +        if (tric_dist[dim_ind] == 0)
 +        {
 +            /* Rectangular direction, easy */
 +            r = cg_cm[cg][dim] - c->c[dim_ind][zone];
 +            if (r > 0)
 +            {
 +                r2 += r*r;
 +            }
 +            if (bDistMB_pulse)
 +            {
 +                r = cg_cm[cg][dim] - c->bc[dim_ind];
 +                if (r > 0)
 +                {
 +                    rb2 += r*r;
 +                }
 +            }
 +            /* Rounding gives at most a 16% reduction
 +             * in communicated atoms
 +             */
 +            if (dim_ind >= 1 && (zonei == 1 || zonei == 2))
 +            {
 +                r = cg_cm[cg][dim0] - c->cr0;
 +                /* This is the first dimension, so always r >= 0 */
 +                r2 += r*r;
 +                if (bDistMB_pulse)
 +                {
 +                    rb2 += r*r;
 +                }
 +            }
 +            if (dim_ind == 2 && (zonei == 2 || zonei == 3))
 +            {
 +                r = cg_cm[cg][dim1] - c->cr1[zone];
 +                if (r > 0)
 +                {
 +                    r2 += r*r;
 +                }
 +                if (bDistMB_pulse)
 +                {
 +                    r = cg_cm[cg][dim1] - c->bcr1;
 +                    if (r > 0)
 +                    {
 +                        rb2 += r*r;
 +                    }
 +                }
 +            }
 +        }
 +        else
 +        {
 +            /* Triclinic direction, more complicated */
 +            clear_rvec(rn);
 +            clear_rvec(rb);
 +            /* Rounding, conservative as the skew_fac multiplication
 +             * will slightly underestimate the distance.
 +             */
 +            if (dim_ind >= 1 && (zonei == 1 || zonei == 2))
 +            {
 +                rn[dim0] = cg_cm[cg][dim0] - c->cr0;
 +                for(i=dim0+1; i<DIM; i++)
 +                {
 +                    rn[dim0] -= cg_cm[cg][i]*v_0[i][dim0];
 +                }
 +                r2 = rn[dim0]*rn[dim0]*sf2_round[dim0];
 +                if (bDistMB_pulse)
 +                {
 +                    rb[dim0] = rn[dim0];
 +                    rb2 = r2;
 +                }
 +                /* Take care that the cell planes along dim0 might not
 +                 * be orthogonal to those along dim1 and dim2.
 +                 */
 +                for(i=1; i<=dim_ind; i++)
 +                {
 +                    dimd = dd->dim[i];
 +                    if (normal[dim0][dimd] > 0)
 +                    {
 +                        rn[dimd] -= rn[dim0]*normal[dim0][dimd];
 +                        if (bDistMB_pulse)
 +                        {
 +                            rb[dimd] -= rb[dim0]*normal[dim0][dimd];
 +                        }
 +                    }
 +                }
 +            }
 +            if (dim_ind == 2 && (zonei == 2 || zonei == 3))
 +            {
 +                rn[dim1] += cg_cm[cg][dim1] - c->cr1[zone];
 +                tric_sh = 0;
 +                for(i=dim1+1; i<DIM; i++)
 +                {
 +                    tric_sh -= cg_cm[cg][i]*v_1[i][dim1];
 +                }
 +                rn[dim1] += tric_sh;
 +                if (rn[dim1] > 0)
 +                {
 +                    r2 += rn[dim1]*rn[dim1]*sf2_round[dim1];
 +                    /* Take care of coupling of the distances
 +                     * to the planes along dim0 and dim1 through dim2.
 +                     */
 +                    r2 -= rn[dim0]*rn[dim1]*skew_fac_01;
 +                    /* Take care that the cell planes along dim1
 +                     * might not be orthogonal to that along dim2.
 +                     */
 +                    if (normal[dim1][dim2] > 0)
 +                    {
 +                        rn[dim2] -= rn[dim1]*normal[dim1][dim2];
 +                    }
 +                }
 +                if (bDistMB_pulse)
 +                {
 +                    rb[dim1] +=
 +                        cg_cm[cg][dim1] - c->bcr1 + tric_sh;
 +                    if (rb[dim1] > 0)
 +                    {
 +                        rb2 += rb[dim1]*rb[dim1]*sf2_round[dim1];
 +                        /* Take care of coupling of the distances
 +                         * to the planes along dim0 and dim1 through dim2.
 +                         */
 +                        rb2 -= rb[dim0]*rb[dim1]*skew_fac_01;
 +                        /* Take care that the cell planes along dim1
 +                         * might not be orthogonal to that along dim2.
 +                         */
 +                        if (normal[dim1][dim2] > 0)
 +                        {
 +                            rb[dim2] -= rb[dim1]*normal[dim1][dim2];
 +                        }
 +                    }
 +                }
 +            }
 +            /* The distance along the communication direction */
 +            rn[dim] += cg_cm[cg][dim] - c->c[dim_ind][zone];
 +            tric_sh = 0;
 +            for(i=dim+1; i<DIM; i++)
 +            {
 +                tric_sh -= cg_cm[cg][i]*v_d[i][dim];
 +            }
 +            rn[dim] += tric_sh;
 +            if (rn[dim] > 0)
 +            {
 +                r2 += rn[dim]*rn[dim]*skew_fac2_d;
 +                /* Take care of coupling of the distances
 +                 * to the planes along dim0 and dim1 through dim2.
 +                 */
 +                if (dim_ind == 1 && zonei == 1)
 +                {
 +                    r2 -= rn[dim0]*rn[dim]*skew_fac_01;
 +                }
 +            }
 +            if (bDistMB_pulse)
 +            {
 +                clear_rvec(rb);
 +                rb[dim] += cg_cm[cg][dim] - c->bc[dim_ind] + tric_sh;
 +                if (rb[dim] > 0)
 +                {
 +                    rb2 += rb[dim]*rb[dim]*skew_fac2_d;
 +                    /* Take care of coupling of the distances
 +                     * to the planes along dim0 and dim1 through dim2.
 +                     */
 +                    if (dim_ind == 1 && zonei == 1)
 +                    {
 +                        rb2 -= rb[dim0]*rb[dim]*skew_fac_01;
 +                    }
 +                }
 +            }
 +        }
 +        
 +        if (r2 < r_comm2 ||
 +            (bDistBonded &&
 +             ((bDistMB && rb2 < r_bcomm2) ||
 +              (bDist2B && r2  < r_bcomm2)) &&
 +             (!bBondComm ||
 +              (GET_CGINFO_BOND_INTER(cginfo[cg]) &&
 +               missing_link(comm->cglink,index_gl[cg],
 +                            comm->bLocalCG)))))
 +        {
 +            /* Make an index to the local charge groups */
 +            if (nsend+1 > ind->nalloc)
 +            {
 +                ind->nalloc = over_alloc_large(nsend+1);
 +                srenew(ind->index,ind->nalloc);
 +            }
 +            if (nsend+1 > *ibuf_nalloc)
 +            {
 +                *ibuf_nalloc = over_alloc_large(nsend+1);
 +                srenew(*ibuf,*ibuf_nalloc);
 +            }
 +            ind->index[nsend] = cg;
 +            (*ibuf)[nsend] = index_gl[cg];
 +            nsend_z++;
 +            vec_rvec_check_alloc(vbuf,nsend+1);
 +            
 +            if (dd->ci[dim] == 0)
 +            {
 +                /* Correct cg_cm for pbc */
 +                rvec_add(cg_cm[cg],box[dim],vbuf->v[nsend]);
 +                if (bScrew)
 +                {
 +                    vbuf->v[nsend][YY] = box[YY][YY] - vbuf->v[nsend][YY];
 +                    vbuf->v[nsend][ZZ] = box[ZZ][ZZ] - vbuf->v[nsend][ZZ];
 +                }
 +            }
 +            else
 +            {
 +                copy_rvec(cg_cm[cg],vbuf->v[nsend]);
 +            }
 +            nsend++;
 +            nat += cgindex[cg+1] - cgindex[cg];
 +        }
 +    }
 +
 +    *nsend_ptr   = nsend;
 +    *nat_ptr     = nat;
 +    *nsend_z_ptr = nsend_z;
 +}
 +
 +static void setup_dd_communication(gmx_domdec_t *dd,
 +                                   matrix box,gmx_ddbox_t *ddbox,
 +                                   t_forcerec *fr,t_state *state,rvec **f)
 +{
 +    int dim_ind,dim,dim0,dim1,dim2,dimd,p,nat_tot;
 +    int nzone,nzone_send,zone,zonei,cg0,cg1;
 +    int c,i,j,cg,cg_gl,nrcg;
 +    int *zone_cg_range,pos_cg,*index_gl,*cgindex,*recv_i;
 +    gmx_domdec_comm_t *comm;
 +    gmx_domdec_zones_t *zones;
 +    gmx_domdec_comm_dim_t *cd;
 +    gmx_domdec_ind_t *ind;
 +    cginfo_mb_t *cginfo_mb;
 +    gmx_bool bBondComm,bDist2B,bDistMB,bDistBonded;
 +    real r_mb,r_comm2,r_scomm2,r_bcomm2,r_0,r_1,r2inc,inv_ncg;
 +    dd_corners_t corners;
 +    ivec tric_dist;
 +    rvec *cg_cm,*normal,*v_d,*v_0=NULL,*v_1=NULL,*recv_vr;
 +    real skew_fac2_d,skew_fac_01;
 +    rvec sf2_round;
 +    int  nsend,nat;
 +    int  th;
 +    
 +    if (debug)
 +    {
 +        fprintf(debug,"Setting up DD communication\n");
 +    }
 +    
 +    comm  = dd->comm;
 +
 +    switch (fr->cutoff_scheme)
 +    {
 +    case ecutsGROUP:
 +        cg_cm = fr->cg_cm;
 +        break;
 +    case ecutsVERLET:
 +        cg_cm = state->x;
 +        break;
 +    default:
 +        gmx_incons("unimplemented");
 +        cg_cm = NULL;
 +    }
 +
 +    for(dim_ind=0; dim_ind<dd->ndim; dim_ind++)
 +    {
 +        dim = dd->dim[dim_ind];
 +
 +        /* Check if we need to use triclinic distances */
 +        tric_dist[dim_ind] = 0;
 +        for(i=0; i<=dim_ind; i++)
 +        {
 +            if (ddbox->tric_dir[dd->dim[i]])
 +            {
 +                tric_dist[dim_ind] = 1;
 +            }
 +        }
 +    }
 +
 +    bBondComm = comm->bBondComm;
 +
 +    /* Do we need to determine extra distances for multi-body bondeds? */
 +    bDistMB = (comm->bInterCGMultiBody && dd->bGridJump && dd->ndim > 1);
 +    
 +    /* Do we need to determine extra distances for only two-body bondeds? */
 +    bDist2B = (bBondComm && !bDistMB);
 +
 +    r_comm2  = sqr(comm->cutoff);
 +    r_bcomm2 = sqr(comm->cutoff_mbody);
 +
 +    if (debug)
 +    {
 +        fprintf(debug,"bBondComm %d, r_bc %f\n",bBondComm,sqrt(r_bcomm2));
 +    }
 +
 +    zones = &comm->zones;
 +    
 +    dim0 = dd->dim[0];
 +    dim1 = (dd->ndim >= 2 ? dd->dim[1] : -1);
 +    dim2 = (dd->ndim >= 3 ? dd->dim[2] : -1);
 +
 +    set_dd_corners(dd,dim0,dim1,dim2,bDistMB,&corners);
 +    
 +    /* Triclinic stuff */
 +    normal = ddbox->normal;
 +    skew_fac_01 = 0;
 +    if (dd->ndim >= 2)
 +    {
 +        v_0 = ddbox->v[dim0];
 +        if (ddbox->tric_dir[dim0] && ddbox->tric_dir[dim1])
 +        {
 +            /* Determine the coupling coefficient for the distances
 +             * to the cell planes along dim0 and dim1 through dim2.
 +             * This is required for correct rounding.
 +             */
 +            skew_fac_01 =
 +                ddbox->v[dim0][dim1+1][dim0]*ddbox->v[dim1][dim1+1][dim1];
 +            if (debug)
 +            {
 +                fprintf(debug,"\nskew_fac_01 %f\n",skew_fac_01);
 +            }
 +        }
 +    }
 +    if (dd->ndim >= 3)
 +    {
 +        v_1 = ddbox->v[dim1];
 +    }
 +    
 +    zone_cg_range = zones->cg_range;
 +    index_gl = dd->index_gl;
 +    cgindex  = dd->cgindex;
 +    cginfo_mb = fr->cginfo_mb;
 +    
 +    zone_cg_range[0]   = 0;
 +    zone_cg_range[1]   = dd->ncg_home;
 +    comm->zone_ncg1[0] = dd->ncg_home;
 +    pos_cg             = dd->ncg_home;
 +    
 +    nat_tot = dd->nat_home;
 +    nzone = 1;
 +    for(dim_ind=0; dim_ind<dd->ndim; dim_ind++)
 +    {
 +        dim = dd->dim[dim_ind];
 +        cd = &comm->cd[dim_ind];
 +        
 +        if (dim >= ddbox->npbcdim && dd->ci[dim] == 0)
 +        {
 +            /* No pbc in this dimension, the first node should not comm. */
 +            nzone_send = 0;
 +        }
 +        else
 +        {
 +            nzone_send = nzone;
 +        }
 +
 +        v_d = ddbox->v[dim];
 +        skew_fac2_d = sqr(ddbox->skew_fac[dim]);
 +
 +        cd->bInPlace = TRUE;
 +        for(p=0; p<cd->np; p++)
 +        {
 +            /* Only atoms communicated in the first pulse are used
 +             * for multi-body bonded interactions or for bBondComm.
 +             */
 +            bDistBonded = ((bDistMB || bDist2B) && p == 0);
 +
 +            ind = &cd->ind[p];
 +            nsend = 0;
 +            nat = 0;
 +            for(zone=0; zone<nzone_send; zone++)
 +            {
 +                if (tric_dist[dim_ind] && dim_ind > 0)
 +                {
 +                    /* Determine slightly more optimized skew_fac's
 +                     * for rounding.
 +                     * This reduces the number of communicated atoms
 +                     * by about 10% for 3D DD of rhombic dodecahedra.
 +                     */
 +                    for(dimd=0; dimd<dim; dimd++)
 +                    {
 +                        sf2_round[dimd] = 1;
 +                        if (ddbox->tric_dir[dimd])
 +                        {
 +                            for(i=dd->dim[dimd]+1; i<DIM; i++)
 +                            {
 +                                /* If we are shifted in dimension i
 +                                 * and the cell plane is tilted forward
 +                                 * in dimension i, skip this coupling.
 +                                 */
 +                                if (!(zones->shift[nzone+zone][i] &&
 +                                      ddbox->v[dimd][i][dimd] >= 0))
 +                                {
 +                                    sf2_round[dimd] +=
 +                                        sqr(ddbox->v[dimd][i][dimd]);
 +                                }
 +                            }
 +                            sf2_round[dimd] = 1/sf2_round[dimd];
 +                        }
 +                    }
 +                }
 +
 +                zonei = zone_perm[dim_ind][zone];
 +                if (p == 0)
 +                {
 +                    /* Here we permutate the zones to obtain a convenient order
 +                     * for neighbor searching
 +                     */
 +                    cg0 = zone_cg_range[zonei];
 +                    cg1 = zone_cg_range[zonei+1];
 +                }
 +                else
 +                {
 +                    /* Look only at the cg's received in the previous grid pulse
 +                     */
 +                    cg1 = zone_cg_range[nzone+zone+1];
 +                    cg0 = cg1 - cd->ind[p-1].nrecv[zone];
 +                }
 +
 +#pragma omp parallel for num_threads(comm->nth) schedule(static)
 +                for(th=0; th<comm->nth; th++)
 +                {
 +                    gmx_domdec_ind_t *ind_p;
 +                    int **ibuf_p,*ibuf_nalloc_p;
 +                    vec_rvec_t *vbuf_p;
 +                    int *nsend_p,*nat_p;
 +                    int *nsend_zone_p;
 +                    int cg0_th,cg1_th;
 +
 +                    if (th == 0)
 +                    {
 +                        /* Thread 0 writes in the comm buffers */
 +                        ind_p         = ind;
 +                        ibuf_p        = &comm->buf_int;
 +                        ibuf_nalloc_p = &comm->nalloc_int;
 +                        vbuf_p        = &comm->vbuf;
 +                        nsend_p       = &nsend;
 +                        nat_p         = &nat;
 +                        nsend_zone_p  = &ind->nsend[zone];
 +                    }
 +                    else
 +                    {
 +                        /* Other threads write into temp buffers */
 +                        ind_p         = &comm->dth[th].ind;
 +                        ibuf_p        = &comm->dth[th].ibuf;
 +                        ibuf_nalloc_p = &comm->dth[th].ibuf_nalloc;
 +                        vbuf_p        = &comm->dth[th].vbuf;
 +                        nsend_p       = &comm->dth[th].nsend;
 +                        nat_p         = &comm->dth[th].nat;
 +                        nsend_zone_p  = &comm->dth[th].nsend_zone;
 +
 +                        comm->dth[th].nsend      = 0;
 +                        comm->dth[th].nat        = 0;
 +                        comm->dth[th].nsend_zone = 0;
 +                    }
 +
 +                    if (comm->nth == 1)
 +                    {
 +                        cg0_th = cg0;
 +                        cg1_th = cg1;
 +                    }
 +                    else
 +                    {
 +                        cg0_th = cg0 + ((cg1 - cg0)* th   )/comm->nth;
 +                        cg1_th = cg0 + ((cg1 - cg0)*(th+1))/comm->nth;
 +                    }
 +                    
 +                    /* Get the cg's for this pulse in this zone */
 +                    get_zone_pulse_cgs(dd,zonei,zone,cg0_th,cg1_th,
 +                                       index_gl,cgindex,
 +                                       dim,dim_ind,dim0,dim1,dim2,
 +                                       r_comm2,r_bcomm2,
 +                                       box,tric_dist,
 +                                       normal,skew_fac2_d,skew_fac_01,
 +                                       v_d,v_0,v_1,&corners,sf2_round,
 +                                       bDistBonded,bBondComm,
 +                                       bDist2B,bDistMB,
 +                                       cg_cm,fr->cginfo,
 +                                       ind_p,
 +                                       ibuf_p,ibuf_nalloc_p,
 +                                       vbuf_p,
 +                                       nsend_p,nat_p,
 +                                       nsend_zone_p);
 +                }
 +
 +                /* Append data of threads>=1 to the communication buffers */
 +                for(th=1; th<comm->nth; th++)
 +                {
 +                    dd_comm_setup_work_t *dth;
 +                    int i,ns1;
 +
 +                    dth = &comm->dth[th];
 +
 +                    ns1 = nsend + dth->nsend_zone;
 +                    if (ns1 > ind->nalloc)
 +                    {
 +                        ind->nalloc = over_alloc_dd(ns1);
 +                        srenew(ind->index,ind->nalloc);
 +                    }
 +                    if (ns1 > comm->nalloc_int)
 +                    {
 +                        comm->nalloc_int = over_alloc_dd(ns1);
 +                        srenew(comm->buf_int,comm->nalloc_int);
 +                    }
 +                    if (ns1 > comm->vbuf.nalloc)
 +                    {
 +                        comm->vbuf.nalloc = over_alloc_dd(ns1);
 +                        srenew(comm->vbuf.v,comm->vbuf.nalloc);
 +                    }
 +
 +                    for(i=0; i<dth->nsend_zone; i++)
 +                    {
 +                        ind->index[nsend] = dth->ind.index[i];
 +                        comm->buf_int[nsend] = dth->ibuf[i];
 +                        copy_rvec(dth->vbuf.v[i],
 +                                  comm->vbuf.v[nsend]);
 +                        nsend++;
 +                    }
 +                    nat              += dth->nat;
 +                    ind->nsend[zone] += dth->nsend_zone;
 +                }
 +            }
 +            /* Clear the counts in case we do not have pbc */
 +            for(zone=nzone_send; zone<nzone; zone++)
 +            {
 +                ind->nsend[zone] = 0;
 +            }
 +            ind->nsend[nzone]   = nsend;
 +            ind->nsend[nzone+1] = nat;
 +            /* Communicate the number of cg's and atoms to receive */
 +            dd_sendrecv_int(dd, dim_ind, dddirBackward,
 +                            ind->nsend, nzone+2,
 +                            ind->nrecv, nzone+2);
 +            
 +            /* The rvec buffer is also required for atom buffers of size nsend
 +             * in dd_move_x and dd_move_f.
 +             */
 +            vec_rvec_check_alloc(&comm->vbuf,ind->nsend[nzone+1]);
 +
 +            if (p > 0)
 +            {
 +                /* We can receive in place if only the last zone is not empty */
 +                for(zone=0; zone<nzone-1; zone++)
 +                {
 +                    if (ind->nrecv[zone] > 0)
 +                    {
 +                        cd->bInPlace = FALSE;
 +                    }
 +                }
 +                if (!cd->bInPlace)
 +                {
 +                    /* The int buffer is only required here for the cg indices */
 +                    if (ind->nrecv[nzone] > comm->nalloc_int2)
 +                    {
 +                        comm->nalloc_int2 = over_alloc_dd(ind->nrecv[nzone]);
 +                        srenew(comm->buf_int2,comm->nalloc_int2);
 +                    }
 +                    /* The rvec buffer is also required for atom buffers
 +                     * of size nrecv in dd_move_x and dd_move_f.
 +                     */
 +                    i = max(cd->ind[0].nrecv[nzone+1],ind->nrecv[nzone+1]);
 +                    vec_rvec_check_alloc(&comm->vbuf2,i);
 +                }
 +            }
 +            
 +            /* Make space for the global cg indices */
 +            if (pos_cg + ind->nrecv[nzone] > dd->cg_nalloc
 +                || dd->cg_nalloc == 0)
 +            {
 +                dd->cg_nalloc = over_alloc_dd(pos_cg + ind->nrecv[nzone]);
 +                srenew(index_gl,dd->cg_nalloc);
 +                srenew(cgindex,dd->cg_nalloc+1);
 +            }
 +            /* Communicate the global cg indices */
 +            if (cd->bInPlace)
 +            {
 +                recv_i = index_gl + pos_cg;
 +            }
 +            else
 +            {
 +                recv_i = comm->buf_int2;
 +            }
 +            dd_sendrecv_int(dd, dim_ind, dddirBackward,
 +                            comm->buf_int, nsend,
 +                            recv_i,        ind->nrecv[nzone]);
 +
 +            /* Make space for cg_cm */
 +            dd_check_alloc_ncg(fr,state,f,pos_cg + ind->nrecv[nzone]);
 +            if (fr->cutoff_scheme == ecutsGROUP)
 +            {
 +                cg_cm = fr->cg_cm;
 +            }
 +            else
 +            {
 +                cg_cm = state->x;
 +            }
 +            /* Communicate cg_cm */
 +            if (cd->bInPlace)
 +            {
 +                recv_vr = cg_cm + pos_cg;
 +            }
 +            else
 +            {
 +                recv_vr = comm->vbuf2.v;
 +            }
 +            dd_sendrecv_rvec(dd, dim_ind, dddirBackward,
 +                             comm->vbuf.v, nsend,
 +                             recv_vr,      ind->nrecv[nzone]);
 +            
 +            /* Make the charge group index */
 +            if (cd->bInPlace)
 +            {
 +                zone = (p == 0 ? 0 : nzone - 1);
 +                while (zone < nzone)
 +                {
 +                    for(cg=0; cg<ind->nrecv[zone]; cg++)
 +                    {
 +                        cg_gl = index_gl[pos_cg];
 +                        fr->cginfo[pos_cg] = ddcginfo(cginfo_mb,cg_gl);
 +                        nrcg = GET_CGINFO_NATOMS(fr->cginfo[pos_cg]);
 +                        cgindex[pos_cg+1] = cgindex[pos_cg] + nrcg;
 +                        if (bBondComm)
 +                        {
 +                            /* Update the charge group presence,
 +                             * so we can use it in the next pass of the loop.
 +                             */
 +                            comm->bLocalCG[cg_gl] = TRUE;
 +                        }
 +                        pos_cg++;
 +                    }
 +                    if (p == 0)
 +                    {
 +                        comm->zone_ncg1[nzone+zone] = ind->nrecv[zone];
 +                    }
 +                    zone++;
 +                    zone_cg_range[nzone+zone] = pos_cg;
 +                }
 +            }
 +            else
 +            {
 +                /* This part of the code is never executed with bBondComm. */
 +                merge_cg_buffers(nzone,cd,p,zone_cg_range,
 +                                 index_gl,recv_i,cg_cm,recv_vr,
 +                                 cgindex,fr->cginfo_mb,fr->cginfo);
 +                pos_cg += ind->nrecv[nzone];
 +            }
 +            nat_tot += ind->nrecv[nzone+1];
 +        }
 +        if (!cd->bInPlace)
 +        {
 +            /* Store the atom block for easy copying of communication buffers */
 +            make_cell2at_index(cd,nzone,zone_cg_range[nzone],cgindex);
 +        }
 +        nzone += nzone;
 +    }
 +    dd->index_gl = index_gl;
 +    dd->cgindex  = cgindex;
 +    
 +    dd->ncg_tot = zone_cg_range[zones->n];
 +    dd->nat_tot = nat_tot;
 +    comm->nat[ddnatHOME] = dd->nat_home;
 +    for(i=ddnatZONE; i<ddnatNR; i++)
 +    {
 +        comm->nat[i] = dd->nat_tot;
 +    }
 +
 +    if (!bBondComm)
 +    {
 +        /* We don't need to update cginfo, since that was alrady done above.
 +         * So we pass NULL for the forcerec.
 +         */
 +        dd_set_cginfo(dd->index_gl,dd->ncg_home,dd->ncg_tot,
 +                      NULL,comm->bLocalCG);
 +    }
 +
 +    if (debug)
 +    {
 +        fprintf(debug,"Finished setting up DD communication, zones:");
 +        for(c=0; c<zones->n; c++)
 +        {
 +            fprintf(debug," %d",zones->cg_range[c+1]-zones->cg_range[c]);
 +        }
 +        fprintf(debug,"\n");
 +    }
 +}
 +
 +static void set_cg_boundaries(gmx_domdec_zones_t *zones)
 +{
 +    int c;
 +    
 +    for(c=0; c<zones->nizone; c++)
 +    {
 +        zones->izone[c].cg1  = zones->cg_range[c+1];
 +        zones->izone[c].jcg0 = zones->cg_range[zones->izone[c].j0];
 +        zones->izone[c].jcg1 = zones->cg_range[zones->izone[c].j1];
 +    }
 +}
 +
 +static void set_zones_size(gmx_domdec_t *dd,
 +                           matrix box,const gmx_ddbox_t *ddbox,
 +                           int zone_start,int zone_end)
 +{
 +    gmx_domdec_comm_t *comm;
 +    gmx_domdec_zones_t *zones;
 +    gmx_bool bDistMB;
 +    int  z,zi,zj0,zj1,d,dim;
 +    real rcs,rcmbs;
 +    int  i,j;
 +    real size_j,add_tric;
 +    real vol;
 +
 +    comm = dd->comm;
 +
 +    zones = &comm->zones;
 +
 +    /* Do we need to determine extra distances for multi-body bondeds? */
 +    bDistMB = (comm->bInterCGMultiBody && dd->bGridJump && dd->ndim > 1);
 +
 +    for(z=zone_start; z<zone_end; z++)
 +    {
 +        /* Copy cell limits to zone limits.
 +         * Valid for non-DD dims and non-shifted dims.
 +         */
 +        copy_rvec(comm->cell_x0,zones->size[z].x0);
 +        copy_rvec(comm->cell_x1,zones->size[z].x1);
 +    }
 +
 +    for(d=0; d<dd->ndim; d++)
 +    {
 +        dim = dd->dim[d];
 +
 +        for(z=0; z<zones->n; z++)
 +        {
 +            /* With a staggered grid we have different sizes
 +             * for non-shifted dimensions.
 +             */
 +            if (dd->bGridJump && zones->shift[z][dim] == 0)
 +            {
 +                if (d == 1)
 +                {
 +                    zones->size[z].x0[dim] = comm->zone_d1[zones->shift[z][dd->dim[d-1]]].min0;
 +                    zones->size[z].x1[dim] = comm->zone_d1[zones->shift[z][dd->dim[d-1]]].max1;
 +                }
 +                else if (d == 2)
 +                {
 +                    zones->size[z].x0[dim] = comm->zone_d2[zones->shift[z][dd->dim[d-2]]][zones->shift[z][dd->dim[d-1]]].min0;
 +                    zones->size[z].x1[dim] = comm->zone_d2[zones->shift[z][dd->dim[d-2]]][zones->shift[z][dd->dim[d-1]]].max1;
 +                }
 +            }
 +        }
 +
 +        rcs   = comm->cutoff;
 +        rcmbs = comm->cutoff_mbody;
 +        if (ddbox->tric_dir[dim])
 +        {
 +            rcs   /= ddbox->skew_fac[dim];
 +            rcmbs /= ddbox->skew_fac[dim];
 +        }
 +
 +        /* Set the lower limit for the shifted zone dimensions */
 +        for(z=zone_start; z<zone_end; z++)
 +        {
 +            if (zones->shift[z][dim] > 0)
 +            {
 +                dim = dd->dim[d];
 +                if (!dd->bGridJump || d == 0)
 +                {
 +                    zones->size[z].x0[dim] = comm->cell_x1[dim];
 +                    zones->size[z].x1[dim] = comm->cell_x1[dim] + rcs;
 +                }
 +                else
 +                {
 +                    /* Here we take the lower limit of the zone from
 +                     * the lowest domain of the zone below.
 +                     */
 +                    if (z < 4)
 +                    {
 +                        zones->size[z].x0[dim] =
 +                             comm->zone_d1[zones->shift[z][dd->dim[d-1]]].min1;
 +                    }
 +                    else
 +                    {
 +                        if (d == 1)
 +                        {
 +                            zones->size[z].x0[dim] =
 +                                zones->size[zone_perm[2][z-4]].x0[dim];
 +                        }
 +                        else
 +                        {
 +                            zones->size[z].x0[dim] =
 +                                comm->zone_d2[zones->shift[z][dd->dim[d-2]]][zones->shift[z][dd->dim[d-1]]].min1;
 +                        }
 +                    }
 +                    /* A temporary limit, is updated below */
 +                    zones->size[z].x1[dim] = zones->size[z].x0[dim];
 +
 +                    if (bDistMB)
 +                    {
 +                        for(zi=0; zi<zones->nizone; zi++)
 +                        {
 +                            if (zones->shift[zi][dim] == 0)
 +                            {
 +                                /* This takes the whole zone into account.
 +                                 * With multiple pulses this will lead
 +                                 * to a larger zone then strictly necessary.
 +                                 */
 +                                zones->size[z].x1[dim] = max(zones->size[z].x1[dim],
 +                                                             zones->size[zi].x1[dim]+rcmbs);
 +                            }
 +                        }
 +                    }
 +                }
 +            }
 +        }
 +
 +        /* Loop over the i-zones to set the upper limit of each
 +         * j-zone they see.
 +         */
 +        for(zi=0; zi<zones->nizone; zi++)
 +        {
 +            if (zones->shift[zi][dim] == 0)
 +            {
 +                for(z=zones->izone[zi].j0; z<zones->izone[zi].j1; z++)
 +                {
 +                    if (zones->shift[z][dim] > 0)
 +                    {
 +                        zones->size[z].x1[dim] = max(zones->size[z].x1[dim],
 +                                                     zones->size[zi].x1[dim]+rcs);
 +                    }
 +                }
 +            }
 +        }
 +    }
 +
 +    for(z=zone_start; z<zone_end; z++)
 +    {
 +        /* Initialization only required to keep the compiler happy */
 +        rvec corner_min={0,0,0},corner_max={0,0,0},corner;
 +        int  nc,c;
 +
 +        /* To determine the bounding box for a zone we need to find
 +         * the extreme corners of 4, 2 or 1 corners.
 +         */
 +        nc = 1 << (ddbox->npbcdim - 1);
 +
 +        for(c=0; c<nc; c++)
 +        {
 +            /* Set up a zone corner at x=0, ignoring trilinic couplings */
 +            corner[XX] = 0;
 +            if ((c & 1) == 0)
 +            {
 +                corner[YY] = zones->size[z].x0[YY];
 +            }
 +            else
 +            {
 +                corner[YY] = zones->size[z].x1[YY];
 +            }
 +            if ((c & 2) == 0)
 +            {
 +                corner[ZZ] = zones->size[z].x0[ZZ];
 +            }
 +            else
 +            {
 +                corner[ZZ] = zones->size[z].x1[ZZ];
 +            }
 +            if (dd->ndim == 1 && box[ZZ][YY] != 0)
 +            {
 +                /* With 1D domain decomposition the cg's are not in
 +                 * the triclinic box, but triclinic x-y and rectangular y-z.
 +                 * Shift y back, so it will later end up at 0.
 +                 */
 +                corner[YY] -= corner[ZZ]*box[ZZ][YY]/box[ZZ][ZZ];
 +            }
 +            /* Apply the triclinic couplings */
 +            for(i=YY; i<ddbox->npbcdim; i++)
 +            {
 +                for(j=XX; j<i; j++)
 +                {
 +                    corner[j] += corner[i]*box[i][j]/box[i][i];
 +                }
 +            }
 +            if (c == 0)
 +            {
 +                copy_rvec(corner,corner_min);
 +                copy_rvec(corner,corner_max);
 +            }
 +            else
 +            {
 +                for(i=0; i<DIM; i++)
 +                {
 +                    corner_min[i] = min(corner_min[i],corner[i]);
 +                    corner_max[i] = max(corner_max[i],corner[i]);
 +                }
 +            }
 +        }
 +        /* Copy the extreme cornes without offset along x */
 +        for(i=0; i<DIM; i++)
 +        {
 +            zones->size[z].bb_x0[i] = corner_min[i];
 +            zones->size[z].bb_x1[i] = corner_max[i];
 +        }
 +        /* Add the offset along x */
 +        zones->size[z].bb_x0[XX] += zones->size[z].x0[XX];
 +        zones->size[z].bb_x1[XX] += zones->size[z].x1[XX];
 +    }
 +
 +    if (zone_start == 0)
 +    {
 +        vol = 1;
 +        for(dim=0; dim<DIM; dim++)
 +        {
 +            vol *= zones->size[0].x1[dim] - zones->size[0].x0[dim];
 +        }
 +        zones->dens_zone0 = (zones->cg_range[1] - zones->cg_range[0])/vol;
 +    }
 +
 +    if (debug)
 +    {
 +        for(z=zone_start; z<zone_end; z++)
 +        {
 +            fprintf(debug,"zone %d    %6.3f - %6.3f  %6.3f - %6.3f  %6.3f - %6.3f\n",
 +                    z,
 +                    zones->size[z].x0[XX],zones->size[z].x1[XX],
 +                    zones->size[z].x0[YY],zones->size[z].x1[YY],
 +                    zones->size[z].x0[ZZ],zones->size[z].x1[ZZ]);
 +            fprintf(debug,"zone %d bb %6.3f - %6.3f  %6.3f - %6.3f  %6.3f - %6.3f\n",
 +                    z,
 +                    zones->size[z].bb_x0[XX],zones->size[z].bb_x1[XX],
 +                    zones->size[z].bb_x0[YY],zones->size[z].bb_x1[YY],
 +                    zones->size[z].bb_x0[ZZ],zones->size[z].bb_x1[ZZ]);
 +        }
 +    }
 +}
 +
 +static int comp_cgsort(const void *a,const void *b)
 +{
 +    int comp;
 +    
 +    gmx_cgsort_t *cga,*cgb;
 +    cga = (gmx_cgsort_t *)a;
 +    cgb = (gmx_cgsort_t *)b;
 +    
 +    comp = cga->nsc - cgb->nsc;
 +    if (comp == 0)
 +    {
 +        comp = cga->ind_gl - cgb->ind_gl;
 +    }
 +    
 +    return comp;
 +}
 +
 +static void order_int_cg(int n,const gmx_cgsort_t *sort,
 +                         int *a,int *buf)
 +{
 +    int i;
 +    
 +    /* Order the data */
 +    for(i=0; i<n; i++)
 +    {
 +        buf[i] = a[sort[i].ind];
 +    }
 +    
 +    /* Copy back to the original array */
 +    for(i=0; i<n; i++)
 +    {
 +        a[i] = buf[i];
 +    }
 +}
 +
 +static void order_vec_cg(int n,const gmx_cgsort_t *sort,
 +                         rvec *v,rvec *buf)
 +{
 +    int i;
 +    
 +    /* Order the data */
 +    for(i=0; i<n; i++)
 +    {
 +        copy_rvec(v[sort[i].ind],buf[i]);
 +    }
 +    
 +    /* Copy back to the original array */
 +    for(i=0; i<n; i++)
 +    {
 +        copy_rvec(buf[i],v[i]);
 +    }
 +}
 +
 +static void order_vec_atom(int ncg,const int *cgindex,const gmx_cgsort_t *sort,
 +                           rvec *v,rvec *buf)
 +{
 +    int a,atot,cg,cg0,cg1,i;
 +    
 +    if (cgindex == NULL)
 +    {
 +        /* Avoid the useless loop of the atoms within a cg */
 +        order_vec_cg(ncg,sort,v,buf);
 +
 +        return;
 +    }
 +
 +    /* Order the data */
 +    a = 0;
 +    for(cg=0; cg<ncg; cg++)
 +    {
 +        cg0 = cgindex[sort[cg].ind];
 +        cg1 = cgindex[sort[cg].ind+1];
 +        for(i=cg0; i<cg1; i++)
 +        {
 +            copy_rvec(v[i],buf[a]);
 +            a++;
 +        }
 +    }
 +    atot = a;
 +    
 +    /* Copy back to the original array */
 +    for(a=0; a<atot; a++)
 +    {
 +        copy_rvec(buf[a],v[a]);
 +    }
 +}
 +
 +static void ordered_sort(int nsort2,gmx_cgsort_t *sort2,
 +                         int nsort_new,gmx_cgsort_t *sort_new,
 +                         gmx_cgsort_t *sort1)
 +{
 +    int i1,i2,i_new;
 +    
 +    /* The new indices are not very ordered, so we qsort them */
 +    qsort_threadsafe(sort_new,nsort_new,sizeof(sort_new[0]),comp_cgsort);
 +    
 +    /* sort2 is already ordered, so now we can merge the two arrays */
 +    i1 = 0;
 +    i2 = 0;
 +    i_new = 0;
 +    while(i2 < nsort2 || i_new < nsort_new)
 +    {
 +        if (i2 == nsort2)
 +        {
 +            sort1[i1++] = sort_new[i_new++];
 +        }
 +        else if (i_new == nsort_new)
 +        {
 +            sort1[i1++] = sort2[i2++];
 +        }
 +        else if (sort2[i2].nsc < sort_new[i_new].nsc ||
 +                 (sort2[i2].nsc == sort_new[i_new].nsc &&
 +                  sort2[i2].ind_gl < sort_new[i_new].ind_gl))
 +        {
 +            sort1[i1++] = sort2[i2++];
 +        }
 +        else
 +        {
 +            sort1[i1++] = sort_new[i_new++];
 +        }
 +    }
 +}
 +
 +static int dd_sort_order(gmx_domdec_t *dd,t_forcerec *fr,int ncg_home_old)
 +{
 +    gmx_domdec_sort_t *sort;
 +    gmx_cgsort_t *cgsort,*sort_i;
 +    int  ncg_new,nsort2,nsort_new,i,*a,moved,*ibuf;
 +    int  sort_last,sort_skip;
 +
 +    sort = dd->comm->sort;
 +
 +    a = fr->ns.grid->cell_index;
 +
 +    moved = NSGRID_SIGNAL_MOVED_FAC*fr->ns.grid->ncells;
 +
 +    if (ncg_home_old >= 0)
 +    {
 +        /* The charge groups that remained in the same ns grid cell
 +         * are completely ordered. So we can sort efficiently by sorting
 +         * the charge groups that did move into the stationary list.
 +         */
 +        ncg_new = 0;
 +        nsort2 = 0;
 +        nsort_new = 0;
 +        for(i=0; i<dd->ncg_home; i++)
 +        {
 +            /* Check if this cg did not move to another node */
 +            if (a[i] < moved)
 +            {
 +                if (i >= ncg_home_old || a[i] != sort->sort[i].nsc)
 +                {
 +                    /* This cg is new on this node or moved ns grid cell */
 +                    if (nsort_new >= sort->sort_new_nalloc)
 +                    {
 +                        sort->sort_new_nalloc = over_alloc_dd(nsort_new+1);
 +                        srenew(sort->sort_new,sort->sort_new_nalloc);
 +                    }
 +                    sort_i = &(sort->sort_new[nsort_new++]);
 +                }
 +                else
 +                {
 +                    /* This cg did not move */
 +                    sort_i = &(sort->sort2[nsort2++]);
 +                }
 +                /* Sort on the ns grid cell indices
 +                 * and the global topology index.
 +                 * index_gl is irrelevant with cell ns,
 +                 * but we set it here anyhow to avoid a conditional.
 +                 */
 +                sort_i->nsc    = a[i];
 +                sort_i->ind_gl = dd->index_gl[i];
 +                sort_i->ind    = i;
 +                ncg_new++;
 +            }
 +        }
 +        if (debug)
 +        {
 +            fprintf(debug,"ordered sort cgs: stationary %d moved %d\n",
 +                    nsort2,nsort_new);
 +        }
 +        /* Sort efficiently */
 +        ordered_sort(nsort2,sort->sort2,nsort_new,sort->sort_new,
 +                     sort->sort);
 +    }
 +    else
 +    {
 +        cgsort = sort->sort;
 +        ncg_new = 0;
 +        for(i=0; i<dd->ncg_home; i++)
 +        {
 +            /* Sort on the ns grid cell indices
 +             * and the global topology index
 +             */
 +            cgsort[i].nsc    = a[i];
 +            cgsort[i].ind_gl = dd->index_gl[i];
 +            cgsort[i].ind    = i;
 +            if (cgsort[i].nsc < moved)
 +            {
 +                ncg_new++;
 +            }
 +        }
 +        if (debug)
 +        {
 +            fprintf(debug,"qsort cgs: %d new home %d\n",dd->ncg_home,ncg_new);
 +        }
 +        /* Determine the order of the charge groups using qsort */
 +        qsort_threadsafe(cgsort,dd->ncg_home,sizeof(cgsort[0]),comp_cgsort);
 +    }
 +
 +    return ncg_new;
 +}
 +
 +static int dd_sort_order_nbnxn(gmx_domdec_t *dd,t_forcerec *fr)
 +{
 +    gmx_cgsort_t *sort;
 +    int  ncg_new,i,*a,na;
 +
 +    sort = dd->comm->sort->sort;
 +
 +    nbnxn_get_atomorder(fr->nbv->nbs,&a,&na);
 +
 +    ncg_new = 0;
 +    for(i=0; i<na; i++)
 +    {
 +        if (a[i] >= 0)
 +        {
 +            sort[ncg_new].ind = a[i];
 +            ncg_new++;
 +        }
 +    }
 +
 +    return ncg_new;
 +}
 +
 +static void dd_sort_state(gmx_domdec_t *dd,int ePBC,
 +                          rvec *cgcm,t_forcerec *fr,t_state *state,
 +                          int ncg_home_old)
 +{
 +    gmx_domdec_sort_t *sort;
 +    gmx_cgsort_t *cgsort,*sort_i;
 +    int  *cgindex;
 +    int  ncg_new,i,*ibuf,cgsize;
 +    rvec *vbuf;
 +    
 +    sort = dd->comm->sort;
 +    
 +    if (dd->ncg_home > sort->sort_nalloc)
 +    {
 +        sort->sort_nalloc = over_alloc_dd(dd->ncg_home);
 +        srenew(sort->sort,sort->sort_nalloc);
 +        srenew(sort->sort2,sort->sort_nalloc);
 +    }
 +    cgsort = sort->sort;
 +
 +    switch (fr->cutoff_scheme)
 +    {
 +    case ecutsGROUP:
 +        ncg_new = dd_sort_order(dd,fr,ncg_home_old);
 +        break;
 +    case ecutsVERLET:
 +        ncg_new = dd_sort_order_nbnxn(dd,fr);
 +        break;
 +    default:
 +        gmx_incons("unimplemented");
 +        ncg_new = 0;
 +    }
 +
 +    /* We alloc with the old size, since cgindex is still old */
 +    vec_rvec_check_alloc(&dd->comm->vbuf,dd->cgindex[dd->ncg_home]);
 +    vbuf = dd->comm->vbuf.v;
 +    
 +    if (dd->comm->bCGs)
 +    {
 +        cgindex = dd->cgindex;
 +    }
 +    else
 +    {
 +        cgindex = NULL;
 +    }
 +
 +    /* Remove the charge groups which are no longer at home here */
 +    dd->ncg_home = ncg_new;
 +    if (debug)
 +    {
 +        fprintf(debug,"Set the new home charge group count to %d\n",
 +                dd->ncg_home);
 +    }
 +    
 +    /* Reorder the state */
 +    for(i=0; i<estNR; i++)
 +    {
 +        if (EST_DISTR(i) && (state->flags & (1<<i)))
 +        {
 +            switch (i)
 +            {
 +            case estX:
 +                order_vec_atom(dd->ncg_home,cgindex,cgsort,state->x,vbuf);
 +                break;
 +            case estV:
 +                order_vec_atom(dd->ncg_home,cgindex,cgsort,state->v,vbuf);
 +                break;
 +            case estSDX:
 +                order_vec_atom(dd->ncg_home,cgindex,cgsort,state->sd_X,vbuf);
 +                break;
 +            case estCGP:
 +                order_vec_atom(dd->ncg_home,cgindex,cgsort,state->cg_p,vbuf);
 +                break;
 +            case estLD_RNG:
 +            case estLD_RNGI:
 +            case estDISRE_INITF:
 +            case estDISRE_RM3TAV:
 +            case estORIRE_INITF:
 +            case estORIRE_DTAV:
 +                /* No ordering required */
 +                break;
 +            default:
 +                gmx_incons("Unknown state entry encountered in dd_sort_state");
 +                break;
 +            }
 +        }
 +    }
 +    if (fr->cutoff_scheme == ecutsGROUP)
 +    {
 +        /* Reorder cgcm */
 +        order_vec_cg(dd->ncg_home,cgsort,cgcm,vbuf);
 +    }
 +    
 +    if (dd->ncg_home+1 > sort->ibuf_nalloc)
 +    {
 +        sort->ibuf_nalloc = over_alloc_dd(dd->ncg_home+1);
 +        srenew(sort->ibuf,sort->ibuf_nalloc);
 +    }
 +    ibuf = sort->ibuf;
 +    /* Reorder the global cg index */
 +    order_int_cg(dd->ncg_home,cgsort,dd->index_gl,ibuf);
 +    /* Reorder the cginfo */
 +    order_int_cg(dd->ncg_home,cgsort,fr->cginfo,ibuf);
 +    /* Rebuild the local cg index */
 +    if (dd->comm->bCGs)
 +    {
 +        ibuf[0] = 0;
 +        for(i=0; i<dd->ncg_home; i++)
 +        {
 +            cgsize = dd->cgindex[cgsort[i].ind+1] - dd->cgindex[cgsort[i].ind];
 +            ibuf[i+1] = ibuf[i] + cgsize;
 +        }
 +        for(i=0; i<dd->ncg_home+1; i++)
 +        {
 +            dd->cgindex[i] = ibuf[i];
 +        }
 +    }
 +    else
 +    {
 +        for(i=0; i<dd->ncg_home+1; i++)
 +        {
 +            dd->cgindex[i] = i;
 +        }
 +    }
 +    /* Set the home atom number */
 +    dd->nat_home = dd->cgindex[dd->ncg_home];
 +
 +    if (fr->cutoff_scheme == ecutsVERLET)
 +    {
 +        /* The atoms are now exactly in grid order, update the grid order */
 +        nbnxn_set_atomorder(fr->nbv->nbs);
 +    }
 +    else
 +    {
 +        /* Copy the sorted ns cell indices back to the ns grid struct */
 +        for(i=0; i<dd->ncg_home; i++)
 +        {
 +            fr->ns.grid->cell_index[i] = cgsort[i].nsc;
 +        }
 +        fr->ns.grid->nr = dd->ncg_home;
 +    }
 +}
 +
 +static void add_dd_statistics(gmx_domdec_t *dd)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int ddnat;
 +    
 +    comm = dd->comm;
 +    
 +    for(ddnat=ddnatZONE; ddnat<ddnatNR; ddnat++)
 +    {
 +        comm->sum_nat[ddnat-ddnatZONE] +=
 +            comm->nat[ddnat] - comm->nat[ddnat-1];
 +    }
 +    comm->ndecomp++;
 +}
 +
 +void reset_dd_statistics_counters(gmx_domdec_t *dd)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int ddnat;
 +    
 +    comm = dd->comm;
 +
 +    /* Reset all the statistics and counters for total run counting */
 +    for(ddnat=ddnatZONE; ddnat<ddnatNR; ddnat++)
 +    {
 +        comm->sum_nat[ddnat-ddnatZONE] = 0;
 +    }
 +    comm->ndecomp = 0;
 +    comm->nload = 0;
 +    comm->load_step = 0;
 +    comm->load_sum = 0;
 +    comm->load_max = 0;
 +    clear_ivec(comm->load_lim);
 +    comm->load_mdf = 0;
 +    comm->load_pme = 0;
 +}
 +
 +void print_dd_statistics(t_commrec *cr,t_inputrec *ir,FILE *fplog)
 +{
 +    gmx_domdec_comm_t *comm;
 +    int ddnat;
 +    double av;
 +   
 +    comm = cr->dd->comm;
 +    
 +    gmx_sumd(ddnatNR-ddnatZONE,comm->sum_nat,cr);
 +    
 +    if (fplog == NULL)
 +    {
 +        return;
 +    }
 +    
 +    fprintf(fplog,"\n    D O M A I N   D E C O M P O S I T I O N   S T A T I S T I C S\n\n");
 +            
 +    for(ddnat=ddnatZONE; ddnat<ddnatNR; ddnat++)
 +    {
 +        av = comm->sum_nat[ddnat-ddnatZONE]/comm->ndecomp;
 +        switch(ddnat)
 +        {
 +        case ddnatZONE:
 +            fprintf(fplog,
 +                    " av. #atoms communicated per step for force:  %d x %.1f\n",
 +                    2,av);
 +            break;
 +        case ddnatVSITE:
 +            if (cr->dd->vsite_comm)
 +            {
 +                fprintf(fplog,
 +                        " av. #atoms communicated per step for vsites: %d x %.1f\n",
 +                        (EEL_PME(ir->coulombtype) || ir->coulombtype==eelEWALD) ? 3 : 2,
 +                        av);
 +            }
 +            break;
 +        case ddnatCON:
 +            if (cr->dd->constraint_comm)
 +            {
 +                fprintf(fplog,
 +                        " av. #atoms communicated per step for LINCS:  %d x %.1f\n",
 +                        1 + ir->nLincsIter,av);
 +            }
 +            break;
 +        default:
 +            gmx_incons(" Unknown type for DD statistics");
 +        }
 +    }
 +    fprintf(fplog,"\n");
 +    
 +    if (comm->bRecordLoad && EI_DYNAMICS(ir->eI))
 +    {
 +        print_dd_load_av(fplog,cr->dd);
 +    }
 +}
 +
 +void dd_partition_system(FILE            *fplog,
 +                         gmx_large_int_t      step,
 +                         t_commrec       *cr,
 +                         gmx_bool            bMasterState,
 +                         int             nstglobalcomm,
 +                         t_state         *state_global,
 +                         gmx_mtop_t      *top_global,
 +                         t_inputrec      *ir,
 +                         t_state         *state_local,
 +                         rvec            **f,
 +                         t_mdatoms       *mdatoms,
 +                         gmx_localtop_t  *top_local,
 +                         t_forcerec      *fr,
 +                         gmx_vsite_t     *vsite,
 +                         gmx_shellfc_t   shellfc,
 +                         gmx_constr_t    constr,
 +                         t_nrnb          *nrnb,
 +                         gmx_wallcycle_t wcycle,
 +                         gmx_bool            bVerbose)
 +{
 +    gmx_domdec_t *dd;
 +    gmx_domdec_comm_t *comm;
 +    gmx_ddbox_t ddbox={0};
 +    t_block *cgs_gl;
 +    gmx_large_int_t step_pcoupl;
 +    rvec cell_ns_x0,cell_ns_x1;
 +    int  i,j,n,cg0=0,ncg_home_old=-1,ncg_moved,nat_f_novirsum;
 +    gmx_bool bBoxChanged,bNStGlobalComm,bDoDLB,bCheckDLB,bTurnOnDLB,bLogLoad;
 +    gmx_bool bRedist,bSortCG,bResortAll;
 +    ivec ncells_old={0,0,0},ncells_new={0,0,0},np;
 +    real grid_density;
 +    char sbuf[22];
 +      
 +    dd = cr->dd;
 +    comm = dd->comm;
 +
 +    bBoxChanged = (bMasterState || DEFORM(*ir));
 +    if (ir->epc != epcNO)
 +    {
 +        /* With nstpcouple > 1 pressure coupling happens.
 +         * one step after calculating the pressure.
 +         * Box scaling happens at the end of the MD step,
 +         * after the DD partitioning.
 +         * We therefore have to do DLB in the first partitioning
 +         * after an MD step where P-coupling occured.
 +         * We need to determine the last step in which p-coupling occurred.
 +         * MRS -- need to validate this for vv?
 +         */
 +        n = ir->nstpcouple;
 +        if (n == 1)
 +        {
 +            step_pcoupl = step - 1;
 +        }
 +        else
 +        {
 +            step_pcoupl = ((step - 1)/n)*n + 1;
 +        }
 +        if (step_pcoupl >= comm->partition_step)
 +        {
 +            bBoxChanged = TRUE;
 +        }
 +    }
 +
 +    bNStGlobalComm = (step % nstglobalcomm == 0);
 +
 +    if (!comm->bDynLoadBal)
 +    {
 +        bDoDLB = FALSE;
 +    }
 +    else
 +    {
 +        /* Should we do dynamic load balacing this step?
 +         * Since it requires (possibly expensive) global communication,
 +         * we might want to do DLB less frequently.
 +         */
 +        if (bBoxChanged || ir->epc != epcNO)
 +        {
 +            bDoDLB = bBoxChanged;
 +        }
 +        else
 +        {
 +            bDoDLB = bNStGlobalComm;
 +        }
 +    }
 +
 +    /* Check if we have recorded loads on the nodes */
 +    if (comm->bRecordLoad && dd_load_count(comm))
 +    {
 +        if (comm->eDLB == edlbAUTO && !comm->bDynLoadBal)
 +        {
 +            /* Check if we should use DLB at the second partitioning
 +             * and every 100 partitionings,
 +             * so the extra communication cost is negligible.
 +             */
 +            n = max(100,nstglobalcomm);
 +            bCheckDLB = (comm->n_load_collect == 0 ||
 +                         comm->n_load_have % n == n-1);
 +        }
 +        else
 +        {
 +            bCheckDLB = FALSE;
 +        }
 +        
 +        /* Print load every nstlog, first and last step to the log file */
 +        bLogLoad = ((ir->nstlog > 0 && step % ir->nstlog == 0) ||
 +                    comm->n_load_collect == 0 ||
 +                    (ir->nsteps >= 0 &&
 +                     (step + ir->nstlist > ir->init_step + ir->nsteps)));
 +
 +        /* Avoid extra communication due to verbose screen output
 +         * when nstglobalcomm is set.
 +         */
 +        if (bDoDLB || bLogLoad || bCheckDLB ||
 +            (bVerbose && (ir->nstlist == 0 || nstglobalcomm <= ir->nstlist)))
 +        {
 +            get_load_distribution(dd,wcycle);
 +            if (DDMASTER(dd))
 +            {
 +                if (bLogLoad)
 +                {
 +                    dd_print_load(fplog,dd,step-1);
 +                }
 +                if (bVerbose)
 +                {
 +                    dd_print_load_verbose(dd);
 +                }
 +            }
 +            comm->n_load_collect++;
 +
 +            if (bCheckDLB) {
 +                /* Since the timings are node dependent, the master decides */
 +                if (DDMASTER(dd))
 +                {
 +                    bTurnOnDLB =
 +                        (dd_force_imb_perf_loss(dd) >= DD_PERF_LOSS);
 +                    if (debug)
 +                    {
 +                        fprintf(debug,"step %s, imb loss %f\n",
 +                                gmx_step_str(step,sbuf),
 +                                dd_force_imb_perf_loss(dd));
 +                    }
 +                }
 +                dd_bcast(dd,sizeof(bTurnOnDLB),&bTurnOnDLB);
 +                if (bTurnOnDLB)
 +                {
 +                    turn_on_dlb(fplog,cr,step);
 +                    bDoDLB = TRUE;
 +                }
 +            }
 +        }
 +        comm->n_load_have++;
 +    }
 +
 +    cgs_gl = &comm->cgs_gl;
 +
 +    bRedist = FALSE;
 +    if (bMasterState)
 +    {
 +        /* Clear the old state */
 +        clear_dd_indices(dd,0,0);
 +
 +        set_ddbox(dd,bMasterState,cr,ir,state_global->box,
 +                  TRUE,cgs_gl,state_global->x,&ddbox);
 +    
 +        get_cg_distribution(fplog,step,dd,cgs_gl,
 +                            state_global->box,&ddbox,state_global->x);
 +        
 +        dd_distribute_state(dd,cgs_gl,
 +                            state_global,state_local,f);
 +        
 +        dd_make_local_cgs(dd,&top_local->cgs);
 +        
 +        /* Ensure that we have space for the new distribution */
 +        dd_check_alloc_ncg(fr,state_local,f,dd->ncg_home);
 +
 +        if (fr->cutoff_scheme == ecutsGROUP)
 +        {
 +            calc_cgcm(fplog,0,dd->ncg_home,
 +                      &top_local->cgs,state_local->x,fr->cg_cm);
 +        }
 +        
 +        inc_nrnb(nrnb,eNR_CGCM,dd->nat_home);
 +        
 +        dd_set_cginfo(dd->index_gl,0,dd->ncg_home,fr,comm->bLocalCG);
 +
 +        cg0 = 0;
 +    }
 +    else if (state_local->ddp_count != dd->ddp_count)
 +    {
 +        if (state_local->ddp_count > dd->ddp_count)
 +        {
 +            gmx_fatal(FARGS,"Internal inconsistency state_local->ddp_count (%d) > dd->ddp_count (%d)",state_local->ddp_count,dd->ddp_count);
 +        }
 +        
 +        if (state_local->ddp_count_cg_gl != state_local->ddp_count)
 +        {
 +            gmx_fatal(FARGS,"Internal inconsistency state_local->ddp_count_cg_gl (%d) != state_local->ddp_count (%d)",state_local->ddp_count_cg_gl,state_local->ddp_count);
 +        }
 +        
 +        /* Clear the old state */
 +        clear_dd_indices(dd,0,0);
 +        
 +        /* Build the new indices */
 +        rebuild_cgindex(dd,cgs_gl->index,state_local);
 +        make_dd_indices(dd,cgs_gl->index,0);
 +
 +        if (fr->cutoff_scheme == ecutsGROUP)
 +        {
 +            /* Redetermine the cg COMs */
 +            calc_cgcm(fplog,0,dd->ncg_home,
 +                      &top_local->cgs,state_local->x,fr->cg_cm);
 +        }
 +        
 +        inc_nrnb(nrnb,eNR_CGCM,dd->nat_home);
 +
 +        dd_set_cginfo(dd->index_gl,0,dd->ncg_home,fr,comm->bLocalCG);
 +
 +        set_ddbox(dd,bMasterState,cr,ir,state_local->box,
 +                  TRUE,&top_local->cgs,state_local->x,&ddbox);
 +
 +        bRedist = comm->bDynLoadBal;
 +    }
 +    else
 +    {
 +        /* We have the full state, only redistribute the cgs */
 +
 +        /* Clear the non-home indices */
 +        clear_dd_indices(dd,dd->ncg_home,dd->nat_home);
 +
 +        /* Avoid global communication for dim's without pbc and -gcom */
 +        if (!bNStGlobalComm)
 +        {
 +            copy_rvec(comm->box0    ,ddbox.box0    );
 +            copy_rvec(comm->box_size,ddbox.box_size);
 +        }
 +        set_ddbox(dd,bMasterState,cr,ir,state_local->box,
 +                  bNStGlobalComm,&top_local->cgs,state_local->x,&ddbox);
 +
 +        bBoxChanged = TRUE;
 +        bRedist = TRUE;
 +    }
 +    /* For dim's without pbc and -gcom */
 +    copy_rvec(ddbox.box0    ,comm->box0    );
 +    copy_rvec(ddbox.box_size,comm->box_size);
 +    
 +    set_dd_cell_sizes(dd,&ddbox,dynamic_dd_box(&ddbox,ir),bMasterState,bDoDLB,
 +                      step,wcycle);
 +    
 +    if (comm->nstDDDumpGrid > 0 && step % comm->nstDDDumpGrid == 0)
 +    {
 +        write_dd_grid_pdb("dd_grid",step,dd,state_local->box,&ddbox);
 +    }
 +    
 +    /* Check if we should sort the charge groups */
 +    if (comm->nstSortCG > 0)
 +    {
 +        bSortCG = (bMasterState ||
 +                   (bRedist && (step % comm->nstSortCG == 0)));
 +    }
 +    else
 +    {
 +        bSortCG = FALSE;
 +    }
 +
 +    ncg_home_old = dd->ncg_home;
 +
 +    ncg_moved = 0;
 +    if (bRedist)
 +    {
 +        wallcycle_sub_start(wcycle,ewcsDD_REDIST);
 +
 +        dd_redistribute_cg(fplog,step,dd,ddbox.tric_dir,
 +                           state_local,f,fr,mdatoms,
 +                           !bSortCG,nrnb,&cg0,&ncg_moved);
 +
 +        wallcycle_sub_stop(wcycle,ewcsDD_REDIST);
 +    }
 +    
 +    get_nsgrid_boundaries(ddbox.nboundeddim,state_local->box,
 +                          dd,&ddbox,
 +                          &comm->cell_x0,&comm->cell_x1,
 +                          dd->ncg_home,fr->cg_cm,
 +                          cell_ns_x0,cell_ns_x1,&grid_density);
 +
 +    if (bBoxChanged)
 +    {
 +        comm_dd_ns_cell_sizes(dd,&ddbox,cell_ns_x0,cell_ns_x1,step);
 +    }
 +
 +    switch (fr->cutoff_scheme)
 +    {
 +    case ecutsGROUP:
 +        copy_ivec(fr->ns.grid->n,ncells_old);
 +        grid_first(fplog,fr->ns.grid,dd,&ddbox,fr->ePBC,
 +                   state_local->box,cell_ns_x0,cell_ns_x1,
 +                   fr->rlistlong,grid_density);
 +        break;
 +    case ecutsVERLET:
 +        nbnxn_get_ncells(fr->nbv->nbs,&ncells_old[XX],&ncells_old[YY]);
 +        break;
 +    default:
 +        gmx_incons("unimplemented");
 +    }
 +    /* We need to store tric_dir for dd_get_ns_ranges called from ns.c */
 +    copy_ivec(ddbox.tric_dir,comm->tric_dir);
 +
 +    if (bSortCG)
 +    {
 +        wallcycle_sub_start(wcycle,ewcsDD_GRID);
 +
 +        /* Sort the state on charge group position.
 +         * This enables exact restarts from this step.
 +         * It also improves performance by about 15% with larger numbers
 +         * of atoms per node.
 +         */
 +        
 +        /* Fill the ns grid with the home cell,
 +         * so we can sort with the indices.
 +         */
 +        set_zones_ncg_home(dd);
 +
 +        switch (fr->cutoff_scheme)
 +        {
 +        case ecutsVERLET:
 +            set_zones_size(dd,state_local->box,&ddbox,0,1);
 +
 +            nbnxn_put_on_grid(fr->nbv->nbs,fr->ePBC,state_local->box,
 +                              0,
 +                              comm->zones.size[0].bb_x0,
 +                              comm->zones.size[0].bb_x1,
 +                              0,dd->ncg_home,
 +                              comm->zones.dens_zone0,
 +                              fr->cginfo,
 +                              state_local->x,
++                              ncg_moved,bRedist ? comm->moved : NULL,
 +                              fr->nbv->grp[eintLocal].kernel_type,
 +                              fr->nbv->grp[eintLocal].nbat);
 +
 +            nbnxn_get_ncells(fr->nbv->nbs,&ncells_new[XX],&ncells_new[YY]);
 +            break;
 +        case ecutsGROUP:
 +            fill_grid(fplog,&comm->zones,fr->ns.grid,dd->ncg_home,
 +                      0,dd->ncg_home,fr->cg_cm);
 +            
 +            copy_ivec(fr->ns.grid->n,ncells_new);
 +            break;
 +        default:
 +            gmx_incons("unimplemented");
 +        }
 +
 +        bResortAll = bMasterState;
 +   
 +        /* Check if we can user the old order and ns grid cell indices
 +         * of the charge groups to sort the charge groups efficiently.
 +         */
 +        if (ncells_new[XX] != ncells_old[XX] ||
 +            ncells_new[YY] != ncells_old[YY] ||
 +            ncells_new[ZZ] != ncells_old[ZZ])
 +        {
 +            bResortAll = TRUE;
 +        }
 +
 +        if (debug)
 +        {
 +            fprintf(debug,"Step %s, sorting the %d home charge groups\n",
 +                    gmx_step_str(step,sbuf),dd->ncg_home);
 +        }
 +        dd_sort_state(dd,ir->ePBC,fr->cg_cm,fr,state_local,
 +                      bResortAll ? -1 : ncg_home_old);
 +        /* Rebuild all the indices */
 +        cg0 = 0;
 +        ga2la_clear(dd->ga2la);
 +
 +        wallcycle_sub_stop(wcycle,ewcsDD_GRID);
 +    }
 +
 +    wallcycle_sub_start(wcycle,ewcsDD_SETUPCOMM);
 +    
 +    /* Setup up the communication and communicate the coordinates */
 +    setup_dd_communication(dd,state_local->box,&ddbox,fr,state_local,f);
 +    
 +    /* Set the indices */
 +    make_dd_indices(dd,cgs_gl->index,cg0);
 +
 +    /* Set the charge group boundaries for neighbor searching */
 +    set_cg_boundaries(&comm->zones);
 +
 +    if (fr->cutoff_scheme == ecutsVERLET)
 +    {
 +        set_zones_size(dd,state_local->box,&ddbox,
 +                       bSortCG ? 1 : 0,comm->zones.n);
 +    }
 +
 +    wallcycle_sub_stop(wcycle,ewcsDD_SETUPCOMM);
 +
 +    /*
 +    write_dd_pdb("dd_home",step,"dump",top_global,cr,
 +                 -1,state_local->x,state_local->box);
 +    */
 +
 +    wallcycle_sub_start(wcycle,ewcsDD_MAKETOP);
 +    
 +    /* Extract a local topology from the global topology */
 +    for(i=0; i<dd->ndim; i++)
 +    {
 +        np[dd->dim[i]] = comm->cd[i].np;
 +    }
 +    dd_make_local_top(fplog,dd,&comm->zones,dd->npbcdim,state_local->box,
 +                      comm->cellsize_min,np,
 +                      fr,
 +                      fr->cutoff_scheme==ecutsGROUP ? fr->cg_cm : state_local->x,
 +                      vsite,top_global,top_local);
 +
 +    wallcycle_sub_stop(wcycle,ewcsDD_MAKETOP);
 +
 +    wallcycle_sub_start(wcycle,ewcsDD_MAKECONSTR);
 +    
 +    /* Set up the special atom communication */
 +    n = comm->nat[ddnatZONE];
 +    for(i=ddnatZONE+1; i<ddnatNR; i++)
 +    {
 +        switch(i)
 +        {
 +        case ddnatVSITE:
 +            if (vsite && vsite->n_intercg_vsite)
 +            {
 +                n = dd_make_local_vsites(dd,n,top_local->idef.il);
 +            }
 +            break;
 +        case ddnatCON:
 +            if (dd->bInterCGcons || dd->bInterCGsettles)
 +            {
 +                /* Only for inter-cg constraints we need special code */
 +                n = dd_make_local_constraints(dd,n,top_global,fr->cginfo,
 +                                              constr,ir->nProjOrder,
 +                                              top_local->idef.il);
 +            }
 +            break;
 +        default:
 +            gmx_incons("Unknown special atom type setup");
 +        }
 +        comm->nat[i] = n;
 +    }
 +
 +    wallcycle_sub_stop(wcycle,ewcsDD_MAKECONSTR);
 +
 +    wallcycle_sub_start(wcycle,ewcsDD_TOPOTHER);
 +
 +    /* Make space for the extra coordinates for virtual site
 +     * or constraint communication.
 +     */
 +    state_local->natoms = comm->nat[ddnatNR-1];
 +    if (state_local->natoms > state_local->nalloc)
 +    {
 +        dd_realloc_state(state_local,f,state_local->natoms);
 +    }
 +
 +    if (fr->bF_NoVirSum)
 +    {
 +        if (vsite && vsite->n_intercg_vsite)
 +        {
 +            nat_f_novirsum = comm->nat[ddnatVSITE];
 +        }
 +        else
 +        {
 +            if (EEL_FULL(ir->coulombtype) && dd->n_intercg_excl > 0)
 +            {
 +                nat_f_novirsum = dd->nat_tot;
 +            }
 +            else
 +            {
 +                nat_f_novirsum = dd->nat_home;
 +            }
 +        }
 +    }
 +    else
 +    {
 +        nat_f_novirsum = 0;
 +    }
 +
 +    /* Set the number of atoms required for the force calculation.
 +     * Forces need to be constrained when using a twin-range setup
 +     * or with energy minimization. For simple simulations we could
 +     * avoid some allocation, zeroing and copying, but this is
 +     * probably not worth the complications ande checking.
 +     */
 +    forcerec_set_ranges(fr,dd->ncg_home,dd->ncg_tot,
 +                        dd->nat_tot,comm->nat[ddnatCON],nat_f_novirsum);
 +
 +    /* We make the all mdatoms up to nat_tot_con.
 +     * We could save some work by only setting invmass
 +     * between nat_tot and nat_tot_con.
 +     */
 +    /* This call also sets the new number of home particles to dd->nat_home */
 +    atoms2md(top_global,ir,
 +             comm->nat[ddnatCON],dd->gatindex,0,dd->nat_home,mdatoms);
 +
 +    /* Now we have the charges we can sort the FE interactions */
 +    dd_sort_local_top(dd,mdatoms,top_local);
 +
 +    if (vsite != NULL)
 +    {
 +        /* Now we have updated mdatoms, we can do the last vsite bookkeeping */
 +        split_vsites_over_threads(top_local->idef.il,mdatoms,FALSE,vsite);
 +    }
 +
 +    if (shellfc)
 +    {
 +        /* Make the local shell stuff, currently no communication is done */
 +        make_local_shells(cr,mdatoms,shellfc);
 +    }
 +    
 +      if (ir->implicit_solvent)
 +    {
 +        make_local_gb(cr,fr->born,ir->gb_algorithm);
 +    }
 +
 +    init_bonded_thread_force_reduction(fr,&top_local->idef);
 +
 +    if (!(cr->duty & DUTY_PME))
 +    {
 +        /* Send the charges to our PME only node */
 +        gmx_pme_send_q(cr,mdatoms->nChargePerturbed,
 +                       mdatoms->chargeA,mdatoms->chargeB,
 +                       dd_pme_maxshift_x(dd),dd_pme_maxshift_y(dd));
 +    }
 +    
 +    if (constr)
 +    {
 +        set_constraints(constr,top_local,ir,mdatoms,cr);
 +    }
 +    
 +    if (ir->ePull != epullNO)
 +    {
 +        /* Update the local pull groups */
 +        dd_make_local_pull_groups(dd,ir->pull,mdatoms);
 +    }
 +    
 +    if (ir->bRot)
 +    {
 +        /* Update the local rotation groups */
 +        dd_make_local_rotation_groups(dd,ir->rot);
 +    }
 +
 +
 +    add_dd_statistics(dd);
 +    
 +    /* Make sure we only count the cycles for this DD partitioning */
 +    clear_dd_cycle_counts(dd);
 +    
 +    /* Because the order of the atoms might have changed since
 +     * the last vsite construction, we need to communicate the constructing
 +     * atom coordinates again (for spreading the forces this MD step).
 +     */
 +    dd_move_x_vsites(dd,state_local->box,state_local->x);
 +
 +    wallcycle_sub_stop(wcycle,ewcsDD_TOPOTHER);
 +    
 +    if (comm->nstDDDump > 0 && step % comm->nstDDDump == 0)
 +    {
 +        dd_move_x(dd,state_local->box,state_local->x);
 +        write_dd_pdb("dd_dump",step,"dump",top_global,cr,
 +                     -1,state_local->x,state_local->box);
 +    }
 +
 +    /* Store the partitioning step */
 +    comm->partition_step = step;
 +    
 +    /* Increase the DD partitioning counter */
 +    dd->ddp_count++;
 +    /* The state currently matches this DD partitioning count, store it */
 +    state_local->ddp_count = dd->ddp_count;
 +    if (bMasterState)
 +    {
 +        /* The DD master node knows the complete cg distribution,
 +         * store the count so we can possibly skip the cg info communication.
 +         */
 +        comm->master_cg_ddp_count = (bSortCG ? 0 : dd->ddp_count);
 +    }
 +
 +    if (comm->DD_debug > 0)
 +    {
 +        /* Set the env var GMX_DD_DEBUG if you suspect corrupted indices */
 +        check_index_consistency(dd,top_global->natoms,ncg_mtop(top_global),
 +                                "after partitioning");
 +    }
 +}
index ae2c31c1ec590315783bdf074562db5365edc9e2,0000000000000000000000000000000000000000..f3c063264ec19fcf13e2780dda6024bff20222f5
mode 100644,000000..100644
--- /dev/null
@@@ -1,2614 -1,0 +1,3153 @@@
-     int flood_id;
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + *          GROningen MAchine for Chemical Simulations
 + *
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + *
 + * For more info, check our website at http://www.gromacs.org
 + *
 + * And Hey:
 + * GROwing Monsters And Cloning Shrimps
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <stdio.h>
 +#include <time.h>
 +#include "typedefs.h"
 +#include "string2.h"
 +#include "smalloc.h"
 +#include "names.h"
 +#include "confio.h"
 +#include "mvdata.h"
 +#include "txtdump.h"
 +#include "vec.h"
 +#include <time.h>
 +#include "nrnb.h"
 +#include "mshift.h"
 +#include "mdrun.h"
 +#include "update.h"
 +#include "physics.h"
 +#include "nrjac.h"
 +#include "mtop_util.h"
 +#include "edsam.h"
 +#include "gmxfio.h"
++#include "xvgr.h"
 +#include "groupcoord.h"
 +
 +
 +/* We use the same defines as in mvdata.c here */
 +#define  block_bc(cr,   d) gmx_bcast(     sizeof(d),     &(d),(cr))
 +#define nblock_bc(cr,nr,d) gmx_bcast((nr)*sizeof((d)[0]), (d),(cr))
 +#define   snew_bc(cr,d,nr) { if (!MASTER(cr)) snew((d),(nr)); }
 +
++/* These macros determine the column width in the output file */
++#define EDcol_sfmt "%17s"
++#define EDcol_efmt "%17.5e"
++#define EDcol_ffmt "%17f"
 +
 +/* enum to identify the type of ED: none, normal ED, flooding */
 +enum {eEDnone, eEDedsam, eEDflood, eEDnr};
 +
 +/* enum to identify operations on reference, average, origin, target structures */
 +enum {eedREF, eedAV, eedORI, eedTAR, eedNR};
 +
 +
 +typedef struct
 +{
 +    int    neig;     /* nr of eigenvectors             */
 +    int   *ieig;     /* index nrs of eigenvectors      */
 +    real  *stpsz;    /* stepsizes (per eigenvector)    */
 +    rvec  **vec;     /* eigenvector components         */
 +    real  *xproj;    /* instantaneous x projections    */
 +    real  *fproj;    /* instantaneous f projections    */
 +    real  radius;    /* instantaneous radius           */
 +    real  *refproj;  /* starting or target projecions  */
 +    /* When using flooding as harmonic restraint: The current reference projection
 +     * is at each step calculated from the initial refproj0 and the slope. */
 +    real  *refproj0,*refprojslope;
 +} t_eigvec;
 +
 +
 +typedef struct
 +{
 +    t_eigvec      mon;            /* only monitored, no constraints       */
 +    t_eigvec      linfix;         /* fixed linear constraints             */
 +    t_eigvec      linacc;         /* acceptance linear constraints        */
 +    t_eigvec      radfix;         /* fixed radial constraints (exp)       */
 +    t_eigvec      radacc;         /* acceptance radial constraints (exp)  */
 +    t_eigvec      radcon;         /* acceptance rad. contraction constr.  */
 +} t_edvecs;
 +
 +
 +typedef struct
 +{
 +    real deltaF0;
 +    gmx_bool bHarmonic;           /* Use flooding for harmonic restraint on
 +                                     the eigenvector                          */
 +    gmx_bool bConstForce;         /* Do not calculate a flooding potential,
 +                                     instead flood with a constant force      */
 +    real tau;
 +    real deltaF;
 +    real Efl;
 +    real kT;
 +    real Vfl;
 +    real dt;
 +    real constEfl;
 +    real alpha2;
-     rvec          *x_old;         /* used to keep track of the shift vectors
-                                      such that the ED molecule can always be
-                                      made whole in the parallel case          */
 +    rvec *forces_cartesian;
 +    t_eigvec vecs;         /* use flooding for these */
 +} t_edflood;
 +
 +
 +/* This type is for the average, reference, target, and origin structure    */
 +typedef struct gmx_edx
 +{
 +    int           nr;             /* number of atoms this structure contains  */
 +    int           nr_loc;         /* number of atoms on local node            */
 +    int           *anrs;          /* atom index numbers                       */
 +    int           *anrs_loc;      /* local atom index numbers                 */
 +    int           nalloc_loc;     /* allocation size of anrs_loc              */
 +    int           *c_ind;         /* at which position of the whole anrs
 +                                   * array is a local atom?, i.e.
 +                                   * c_ind[0...nr_loc-1] gives the atom index
 +                                   * with respect to the collective
 +                                   * anrs[0...nr-1] array                     */
 +    rvec          *x;             /* positions for this structure             */
-     struct edpar   *next_edi;      /* Pointer to another ed dataset        */
++    rvec          *x_old;         /* Last positions which have the correct PBC
++                                     representation of the ED group. In
++                                     combination with keeping track of the
++                                     shift vectors, the ED group can always
++                                     be made whole                            */
 +    real          *m;             /* masses                                   */
 +    real          mtot;           /* total mass (only used in sref)           */
 +    real          *sqrtm;         /* sqrt of the masses used for mass-
 +                                   * weighting of analysis (only used in sav) */
 +} t_gmx_edx;
 +
 +
 +typedef struct edpar
 +{
 +    int            nini;           /* total Nr of atoms                    */
 +    gmx_bool       fitmas;         /* true if trans fit with cm            */
 +    gmx_bool       pcamas;         /* true if mass-weighted PCA            */
 +    int            presteps;       /* number of steps to run without any
 +                                    *    perturbations ... just monitoring */
 +    int            outfrq;         /* freq (in steps) of writing to edo    */
 +    int            maxedsteps;     /* max nr of steps per cycle            */
 +
 +    /* all gmx_edx datasets are copied to all nodes in the parallel case   */
 +    struct gmx_edx sref;           /* reference positions, to these fitting
 +                                    * will be done                         */
 +    gmx_bool       bRefEqAv;       /* If true, reference & average indices
 +                                    * are the same. Used for optimization  */
 +    struct gmx_edx sav;            /* average positions                    */
 +    struct gmx_edx star;           /* target positions                     */
 +    struct gmx_edx sori;           /* origin positions                     */
 +
 +    t_edvecs       vecs;           /* eigenvectors                         */
 +    real           slope;          /* minimal slope in acceptance radexp   */
 +
 +    gmx_bool       bNeedDoEdsam;   /* if any of the options mon, linfix, ...
 +                                    * is used (i.e. apart from flooding)   */
 +    t_edflood      flood;          /* parameters especially for flooding   */
 +    struct t_ed_buffer *buf;       /* handle to local buffers              */
-     const char    *edinam;        /* name of ED sampling input file       */
-     const char    *edonam;        /*                     output           */
++    struct edpar   *next_edi;      /* Pointer to another ED group          */
 +} t_edpar;
 +
 +
 +typedef struct gmx_edsam
 +{
 +    int           eEDtype;        /* Type of ED: see enums above          */
-     gmx_bool      bStartFromCpt;
 +    FILE          *edo;           /* output file pointer                  */
 +    t_edpar       *edpar;
 +    gmx_bool      bFirst;
-                                   ED shifts for this ED dataset need to
 +} t_gmx_edsam;
 +
 +
 +struct t_do_edsam
 +{
 +    matrix old_rotmat;
 +    real oldrad;
 +    rvec old_transvec,older_transvec,transvec_compact;
 +    rvec *xcoll;         /* Positions from all nodes, this is the
 +                            collective set we work on.
 +                            These are the positions of atoms with
 +                            average structure indices */
 +    rvec *xc_ref;        /* same but with reference structure indices */
 +    ivec *shifts_xcoll;        /* Shifts for xcoll  */
 +    ivec *extra_shifts_xcoll;  /* xcoll shift changes since last NS step */
 +    ivec *shifts_xc_ref;       /* Shifts for xc_ref */
 +    ivec *extra_shifts_xc_ref; /* xc_ref shift changes since last NS step */
 +    gmx_bool bUpdateShifts;    /* TRUE in NS steps to indicate that the
++                                  ED shifts for this ED group need to
 +                                  be updated */
 +};
 +
 +
 +/* definition of ED buffer structure */
 +struct t_ed_buffer
 +{
 +    struct t_fit_to_ref *           fit_to_ref;
 +    struct t_do_edfit *             do_edfit;
 +    struct t_do_edsam *             do_edsam;
 +    struct t_do_radcon *            do_radcon;
 +};
 +
 +
 +/* Function declarations */
 +static void fit_to_reference(rvec *xcoll,rvec transvec,matrix rotmat,t_edpar *edi);
- static void rad_project(t_edpar *edi, rvec *x, t_eigvec *vec, t_commrec *cr)
 +static void translate_and_rotate(rvec *x,int nat,rvec transvec,matrix rotmat);
++static real rmsd_from_structure(rvec *x, struct gmx_edx *s);
++static int read_edi_file(const char *fn, t_edpar *edi, int nr_mdatoms);
++static void crosscheck_edi_file_vs_checkpoint(gmx_edsam_t ed, edsamstate_t *EDstate);
++static void init_edsamstate(gmx_edsam_t ed, edsamstate_t *EDstate);
++static void write_edo_legend(gmx_edsam_t ed, int nED, const output_env_t oenv);
 +/* End function declarations */
 +
 +
++/* Multiple ED groups will be labeled with letters instead of numbers 
++ * to avoid confusion with eigenvector indices */
++static char get_EDgroupChar(int nr_edi, int nED)
++{
++    if (nED == 1)
++    {
++        return ' ';
++    }
++
++    /* nr_edi = 1 -> A
++     * nr_edi = 2 -> B ...
++     */
++    return 'A' + nr_edi - 1;
++}
++
++
 +/* Does not subtract average positions, projection on single eigenvector is returned
 + * used by: do_linfix, do_linacc, do_radfix, do_radacc, do_radcon
 + * Average position is subtracted in ed_apply_constraints prior to calling projectx
 + */
 +static real projectx(t_edpar *edi, rvec *xcoll, rvec *vec)
 +{
 +    int  i;
 +    real proj=0.0;
 +
 +
 +    for (i=0; i<edi->sav.nr; i++)
++    {
 +        proj += edi->sav.sqrtm[i]*iprod(vec[i], xcoll[i]);
++    }
 +
 +    return proj;
 +}
 +
 +
 +/* Specialized: projection is stored in vec->refproj
 + * -> used for radacc, radfix, radcon  and center of flooding potential
 + * subtracts average positions, projects vector x */
-             vk[1][r]*vh[1][c]+
-             vk[2][r]*vh[2][c];
++static void rad_project(t_edpar *edi, rvec *x, t_eigvec *vec)
 +{
 +    int i;
 +    real rad=0.0;
 +
 +    /* Subtract average positions */
 +    for (i = 0; i < edi->sav.nr; i++)
++    {
 +        rvec_dec(x[i], edi->sav.x[i]);
++    }
 +
 +    for (i = 0; i < vec->neig; i++)
 +    {
 +        vec->refproj[i] = projectx(edi,x,vec->vec[i]);
 +        rad += pow((vec->refproj[i]-vec->xproj[i]),2);
 +    }
 +    vec->radius=sqrt(rad);
 +
 +    /* Add average positions */
 +    for (i = 0; i < edi->sav.nr; i++)
++    {
 +        rvec_inc(x[i], edi->sav.x[i]);
++    }
 +}
 +
 +
 +/* Project vector x, subtract average positions prior to projection and add
 + * them afterwards to retain the unchanged vector. Store in xproj. Mass-weighting
 + * is applied. */
 +static void project_to_eigvectors(rvec       *x,    /* The positions to project to an eigenvector */
 +                                  t_eigvec   *vec,  /* The eigenvectors */
 +                                  t_edpar    *edi)
 +{
 +    int  i;
 +
 +
 +    if (!vec->neig) return;
 +
 +    /* Subtract average positions */
 +    for (i=0; i<edi->sav.nr; i++)
++    {
 +        rvec_dec(x[i], edi->sav.x[i]);
++    }
 +
 +    for (i=0; i<vec->neig; i++)
++    {
 +        vec->xproj[i] = projectx(edi, x, vec->vec[i]);
++    }
 +
 +    /* Add average positions */
 +    for (i=0; i<edi->sav.nr; i++)
++    {
 +        rvec_inc(x[i], edi->sav.x[i]);
++    }
 +}
 +
 +
 +/* Project vector x onto all edi->vecs (mon, linfix,...) */
 +static void project(rvec      *x,     /* positions to project */
 +                    t_edpar   *edi)   /* edi data set */
 +{
 +    /* It is not more work to subtract the average position in every
 +     * subroutine again, because these routines are rarely used simultanely */
 +    project_to_eigvectors(x, &edi->vecs.mon   , edi);
 +    project_to_eigvectors(x, &edi->vecs.linfix, edi);
 +    project_to_eigvectors(x, &edi->vecs.linacc, edi);
 +    project_to_eigvectors(x, &edi->vecs.radfix, edi);
 +    project_to_eigvectors(x, &edi->vecs.radacc, edi);
 +    project_to_eigvectors(x, &edi->vecs.radcon, edi);
 +}
 +
 +
 +static real calc_radius(t_eigvec *vec)
 +{
 +    int i;
 +    real rad=0.0;
 +
 +
 +    for (i=0; i<vec->neig; i++)
++    {
 +        rad += pow((vec->refproj[i]-vec->xproj[i]),2);
++    }
 +
 +    return rad=sqrt(rad);
 +}
 +
 +
 +/* Debug helper */
 +#ifdef DEBUGHELPERS
 +static void dump_xcoll(t_edpar *edi, struct t_do_edsam *buf, t_commrec *cr,
 +                       int step)
 +{
 +    int i;
 +    FILE *fp;
 +    char fn[STRLEN];
 +    rvec *xcoll;
 +    ivec *shifts, *eshifts;
 +
 +
 +    if (!MASTER(cr))
 +        return;
 +
 +    xcoll   = buf->xcoll;
 +    shifts  = buf->shifts_xcoll;
 +    eshifts = buf->extra_shifts_xcoll;
 +
 +    sprintf(fn, "xcolldump_step%d.txt", step);
 +    fp = fopen(fn, "w");
 +
 +    for (i=0; i<edi->sav.nr; i++)
++    {
 +        fprintf(fp, "%d %9.5f %9.5f %9.5f   %d %d %d   %d %d %d\n",
 +                edi->sav.anrs[i]+1,
 +                xcoll[i][XX]  , xcoll[i][YY]  , xcoll[i][ZZ],
 +                shifts[i][XX] , shifts[i][YY] , shifts[i][ZZ],
 +                eshifts[i][XX], eshifts[i][YY], eshifts[i][ZZ]);
++    }
 +
 +    fclose(fp);
 +}
 +
 +
 +/* Debug helper */
 +static void dump_edi_positions(FILE *out, struct gmx_edx *s, const char name[])
 +{
 +    int i;
 +
 +
 +    fprintf(out, "#%s positions:\n%d\n", name, s->nr);
 +    if (s->nr == 0)
++    {
 +        return;
++    }
 +
 +    fprintf(out, "#index, x, y, z");
 +    if (s->sqrtm)
++    {
 +        fprintf(out, ", sqrt(m)");
++    }
 +    for (i=0; i<s->nr; i++)
 +    {
 +        fprintf(out, "\n%6d  %11.6f %11.6f %11.6f",s->anrs[i], s->x[i][XX], s->x[i][YY], s->x[i][ZZ]);
 +        if (s->sqrtm)
++        {
 +            fprintf(out,"%9.3f",s->sqrtm[i]);
++        }
 +    }
 +    fprintf(out, "\n");
 +}
 +
 +
 +/* Debug helper */
 +static void dump_edi_eigenvecs(FILE *out, t_eigvec *ev,
 +                               const char name[], int length)
 +{
 +    int i,j;
 +
 +
 +    fprintf(out, "#%s eigenvectors:\n%d\n", name, ev->neig);
 +    /* Dump the data for every eigenvector: */
 +    for (i=0; i<ev->neig; i++)
 +    {
 +        fprintf(out, "EV %4d\ncomponents %d\nstepsize %f\nxproj %f\nfproj %f\nrefproj %f\nradius %f\nComponents:\n",
 +                ev->ieig[i], length, ev->stpsz[i], ev->xproj[i], ev->fproj[i], ev->refproj[i], ev->radius);
 +        for (j=0; j<length; j++)
++        {
 +            fprintf(out, "%11.6f %11.6f %11.6f\n", ev->vec[i][j][XX], ev->vec[i][j][YY], ev->vec[i][j][ZZ]);
++        }
 +    }
 +}
 +
 +
 +/* Debug helper */
 +static void dump_edi(t_edpar *edpars, t_commrec *cr, int nr_edi)
 +{
 +    FILE  *out;
 +    char  fn[STRLEN];
 +
 +
 +    sprintf(fn, "EDdump_node%d_edi%d", cr->nodeid, nr_edi);
 +    out = ffopen(fn, "w");
 +
 +    fprintf(out,"#NINI\n %d\n#FITMAS\n %d\n#ANALYSIS_MAS\n %d\n",
 +            edpars->nini,edpars->fitmas,edpars->pcamas);
 +    fprintf(out,"#OUTFRQ\n %d\n#MAXLEN\n %d\n#SLOPECRIT\n %f\n",
 +            edpars->outfrq,edpars->maxedsteps,edpars->slope);
 +    fprintf(out,"#PRESTEPS\n %d\n#DELTA_F0\n %f\n#TAU\n %f\n#EFL_NULL\n %f\n#ALPHA2\n %f\n",
 +            edpars->presteps,edpars->flood.deltaF0,edpars->flood.tau,
 +            edpars->flood.constEfl,edpars->flood.alpha2);
 +
 +    /* Dump reference, average, target, origin positions */
 +    dump_edi_positions(out, &edpars->sref, "REFERENCE");
 +    dump_edi_positions(out, &edpars->sav , "AVERAGE"  );
 +    dump_edi_positions(out, &edpars->star, "TARGET"   );
 +    dump_edi_positions(out, &edpars->sori, "ORIGIN"   );
 +
 +    /* Dump eigenvectors */
 +    dump_edi_eigenvecs(out, &edpars->vecs.mon   , "MONITORED", edpars->sav.nr);
 +    dump_edi_eigenvecs(out, &edpars->vecs.linfix, "LINFIX"   , edpars->sav.nr);
 +    dump_edi_eigenvecs(out, &edpars->vecs.linacc, "LINACC"   , edpars->sav.nr);
 +    dump_edi_eigenvecs(out, &edpars->vecs.radfix, "RADFIX"   , edpars->sav.nr);
 +    dump_edi_eigenvecs(out, &edpars->vecs.radacc, "RADACC"   , edpars->sav.nr);
 +    dump_edi_eigenvecs(out, &edpars->vecs.radcon, "RADCON"   , edpars->sav.nr);
 +
 +    /* Dump flooding eigenvectors */
 +    dump_edi_eigenvecs(out, &edpars->flood.vecs, "FLOODING"  , edpars->sav.nr);
 +
 +    /* Dump ed local buffer */
 +    fprintf(out, "buf->do_edfit         =%p\n", (void*)edpars->buf->do_edfit  );
 +    fprintf(out, "buf->do_edsam         =%p\n", (void*)edpars->buf->do_edsam  );
 +    fprintf(out, "buf->do_radcon        =%p\n", (void*)edpars->buf->do_radcon );
 +
 +    ffclose(out);
 +}
 +
 +
 +/* Debug helper */
 +static void dump_rotmat(FILE* out,matrix rotmat)
 +{
 +    fprintf(out,"ROTMAT: %12.8f %12.8f %12.8f\n",rotmat[XX][XX],rotmat[XX][YY],rotmat[XX][ZZ]);
 +    fprintf(out,"ROTMAT: %12.8f %12.8f %12.8f\n",rotmat[YY][XX],rotmat[YY][YY],rotmat[YY][ZZ]);
 +    fprintf(out,"ROTMAT: %12.8f %12.8f %12.8f\n",rotmat[ZZ][XX],rotmat[ZZ][YY],rotmat[ZZ][ZZ]);
 +}
 +
 +
 +/* Debug helper */
 +static void dump_rvec(FILE *out, int dim, rvec *x)
 +{
 +    int i;
 +
 +
 +    for (i=0; i<dim; i++)
++    {
 +        fprintf(out,"%4d   %f %f %f\n",i,x[i][XX],x[i][YY],x[i][ZZ]);
++    }
 +}
 +
 +
 +/* Debug helper */
 +static void dump_mat(FILE* out, int dim, double** mat)
 +{
 +    int i,j;
 +
 +
 +    fprintf(out,"MATRIX:\n");
 +    for (i=0;i<dim;i++)
 +    {
 +        for (j=0;j<dim;j++)
++        {
 +            fprintf(out,"%f ",mat[i][j]);
++        }
 +        fprintf(out,"\n");
 +    }
 +}
 +#endif
 +
 +
 +struct t_do_edfit {
 +    double **omega;
 +    double **om;
 +};
 +
 +static void do_edfit(int natoms,rvec *xp,rvec *x,matrix R,t_edpar *edi)
 +{
 +    /* this is a copy of do_fit with some modifications */
 +    int    c,r,n,j,i,irot;
 +    double d[6],xnr,xpc;
 +    matrix vh,vk,u;
 +    int    index;
 +    real   max_d;
 +
 +    struct t_do_edfit *loc;
 +    gmx_bool bFirst;
 +
 +    if(edi->buf->do_edfit != NULL)
++    {
 +        bFirst = FALSE;
++    }
 +    else
 +    {
 +        bFirst = TRUE;
 +        snew(edi->buf->do_edfit,1);
 +    }
 +    loc = edi->buf->do_edfit;
 +
 +    if (bFirst)
 +    {
 +        snew(loc->omega,2*DIM);
 +        snew(loc->om,2*DIM);
 +        for(i=0; i<2*DIM; i++)
 +        {
 +            snew(loc->omega[i],2*DIM);
 +            snew(loc->om[i],2*DIM);
 +        }
 +    }
 +
 +    for(i=0;(i<6);i++)
 +    {
 +        d[i]=0;
 +        for(j=0;(j<6);j++)
 +        {
 +            loc->omega[i][j]=0;
 +            loc->om[i][j]=0;
 +        }
 +    }
 +
 +    /* calculate the matrix U */
 +    clear_mat(u);
 +    for(n=0;(n<natoms);n++)
 +    {
 +        for(c=0; (c<DIM); c++)
 +        {
 +            xpc=xp[n][c];
 +            for(r=0; (r<DIM); r++)
 +            {
 +                xnr=x[n][r];
 +                u[c][r]+=xnr*xpc;
 +            }
 +        }
 +    }
 +
 +    /* construct loc->omega */
 +    /* loc->omega is symmetric -> loc->omega==loc->omega' */
 +    for(r=0;(r<6);r++)
++    {
 +        for(c=0;(c<=r);c++)
++        {
 +            if ((r>=3) && (c<3))
 +            {
 +                loc->omega[r][c]=u[r-3][c];
 +                loc->omega[c][r]=u[r-3][c];
 +            }
 +            else
 +            {
 +                loc->omega[r][c]=0;
 +                loc->omega[c][r]=0;
 +            }
++        }
++    }
 +
 +    /* determine h and k */
 +#ifdef DEBUG
 +    {
 +        int i;
 +        dump_mat(stderr,2*DIM,loc->omega);
 +        for (i=0; i<6; i++)
++        {
 +            fprintf(stderr,"d[%d] = %f\n",i,d[i]);
++        }
 +    }
 +#endif
 +    jacobi(loc->omega,6,d,loc->om,&irot);
 +
 +    if (irot==0)
++    {
 +        fprintf(stderr,"IROT=0\n");
++    }
 +
 +    index=0; /* For the compiler only */
 +
 +    for(j=0;(j<3);j++)
 +    {
 +        max_d=-1000;
 +        for(i=0;(i<6);i++)
++        {
 +            if (d[i]>max_d)
 +            {
 +                max_d=d[i];
 +                index=i;
 +            }
++        }
 +        d[index]=-10000;
 +        for(i=0;(i<3);i++)
 +        {
 +            vh[j][i]=M_SQRT2*loc->om[i][index];
 +            vk[j][i]=M_SQRT2*loc->om[i+DIM][index];
 +        }
 +    }
 +
 +    /* determine R */
 +    for(c=0;(c<3);c++)
++    {
 +        for(r=0;(r<3);r++)
++        {
 +            R[c][r]=vk[0][r]*vh[0][c]+
-                 vk[1][r]*vh[1][c]-
-                 vk[2][r]*vh[2][c];
++                    vk[1][r]*vh[1][c]+
++                    vk[2][r]*vh[2][c];
++        }
++    }
 +    if (det(R) < 0)
++    {
 +        for(c=0;(c<3);c++)
++        {
 +            for(r=0;(r<3);r++)
++            {
 +                R[c][r]=vk[0][r]*vh[0][c]+
- static void write_edo_flood(t_edpar *edi, FILE *fp, gmx_large_int_t step)
++                        vk[1][r]*vh[1][c]-
++                        vk[2][r]*vh[2][c];
++            }
++        }
++    }
 +}
 +
 +
 +static void rmfit(int nat, rvec *xcoll, rvec transvec, matrix rotmat)
 +{
 +    rvec vec;
 +    matrix tmat;
 +
 +
 +    /* Remove rotation.
 +     * The inverse rotation is described by the transposed rotation matrix */
 +    transpose(rotmat,tmat);
 +    rotate_x(xcoll, nat, tmat);
 +
 +    /* Remove translation */
 +    vec[XX]=-transvec[XX];
 +    vec[YY]=-transvec[YY];
 +    vec[ZZ]=-transvec[ZZ];
 +    translate_x(xcoll, nat, vec);
 +}
 +
 +
 +/**********************************************************************************
 + ******************** FLOODING ****************************************************
 + **********************************************************************************
 +
 +The flooding ability was added later to edsam. Many of the edsam functionality could be reused for that purpose.
 +The flooding covariance matrix, i.e. the selected eigenvectors and their corresponding eigenvalues are
 +read as 7th Component Group. The eigenvalues are coded into the stepsize parameter (as used by -linfix or -linacc).
 +
 +do_md clls right in the beginning the function init_edsam, which reads the edi file, saves all the necessary information in
 +the edi structure and calls init_flood, to initialise some extra fields in the edi->flood structure.
 +
 +since the flooding acts on forces do_flood is called from the function force() (force.c), while the other
 +edsam functionality is hooked into md via the update() (update.c) function acting as constraint on positions.
 +
 +do_flood makes a copy of the positions,
 +fits them, projects them computes flooding_energy, and flooding forces. The forces are computed in the
 +space of the eigenvectors and are then blown up to the full cartesian space and rotated back to remove the
 +fit. Then do_flood adds these forces to the forcefield-forces
 +(given as parameter) and updates the adaptive flooding parameters Efl and deltaF.
 +
 +To center the flooding potential at a different location one can use the -ori option in make_edi. The ori
 +structure is projected to the system of eigenvectors and then this position in the subspace is used as
 +center of the flooding potential.   If the option is not used, the center will be zero in the subspace,
 +i.e. the average structure as given in the make_edi file.
 +
 +To use the flooding potential as restraint, make_edi has the option -restrain, which leads to inverted
 +signs of alpha2 and Efl, such that the sign in the exponential of Vfl is not inverted but the sign of
 +Vfl is inverted. Vfl = Efl * exp (- .../Efl/alpha2*x^2...) With tau>0 the negative Efl will grow slowly
 +so that the restraint is switched off slowly. When Efl==0 and inverted flooding is ON is reached no
 + further adaption is applied, Efl will stay constant at zero.
 +
 +To use restraints with harmonic potentials switch -restrain and -harmonic. Then the eigenvalues are
 +used as spring constants for the harmonic potential.
 +Note that eq3 in the flooding paper (J. Comp. Chem. 2006, 27, 1693-1702) defines the parameter lambda \
 +as the inverse of the spring constant, whereas the implementation uses lambda as the spring constant.
 +
 +To use more than one flooding matrix just concatenate several .edi files (cat flood1.edi flood2.edi > flood_all.edi)
 +the routine read_edi_file reads all of theses flooding files.
 +The structure t_edi is now organized as a list of t_edis and the function do_flood cycles through the list
 +calling the do_single_flood() routine for every single entry. Since every state variables have been kept in one
 +edi there is no interdependence whatsoever. The forces are added together.
 +
 +  To write energies into the .edr file, call the function
 +        get_flood_enx_names(char**, int *nnames) to get the Header (Vfl1 Vfl2... Vfln)
 +and call
 +        get_flood_energies(real Vfl[],int nnames);
 +
 +  TODO:
 +- one could program the whole thing such that Efl, Vfl and deltaF is written to the .edr file. -- i dont know how to do that, yet.
 +
 +  Maybe one should give a range of atoms for which to remove motion, so that motion is removed with
 +  two edsam files from two peptide chains
 +*/
 +
-     char buf[22];
-     gmx_bool bOutputRef=FALSE;
++static void write_edo_flood(t_edpar *edi, FILE *fp, real rmsd)
 +{
 +    int i;
-     fprintf(fp,"%d.th FL: %s %12.5e %12.5e %12.5e\n",
-             edi->flood.flood_id, gmx_step_str(step,buf),
-             edi->flood.Efl, edi->flood.Vfl, edi->flood.deltaF);
 +
 +
-     /* Check whether any of the references changes with time (this can happen
-      * in case flooding is used as harmonic restraint). If so, output all the
-      * current reference projections. */
-     if (edi->flood.bHarmonic)
++    /* Output how well we fit to the reference structure */
++    fprintf(fp, EDcol_ffmt, rmsd);
 +
-         for (i = 0; i < edi->flood.vecs.neig; i++)
++    for (i=0; i<edi->flood.vecs.neig; i++)
 +    {
-             if (edi->flood.vecs.refprojslope[i] != 0.0)
-                 bOutputRef=TRUE;
++        fprintf(fp, EDcol_efmt, edi->flood.vecs.xproj[i]);
++
++        /* Check whether the reference projection changes with time (this can happen
++         * in case flooding is used as harmonic restraint). If so, output the
++         * current reference projection */
++        if (edi->flood.bHarmonic && edi->flood.vecs.refprojslope[i] != 0.0)
 +        {
-         if (bOutputRef)
++            fprintf(fp, EDcol_efmt, edi->flood.vecs.refproj[i]);
 +        }
-             fprintf(fp, "Ref. projs.: ");
-             for (i = 0; i < edi->flood.vecs.neig; i++)
-             {
-                 fprintf(fp, "%12.5e ", edi->flood.vecs.refproj[i]);
-             }
-             fprintf(fp, "\n");
++
++        /* Output Efl if we are doing adaptive flooding */
++        if (0 != edi->flood.tau)
 +        {
-     }
-     fprintf(fp,"FL_FORCES: ");
-     for (i=0; i<edi->flood.vecs.neig; i++)
-         fprintf(fp," %12.5e",edi->flood.vecs.fproj[i]);
++            fprintf(fp, EDcol_efmt, edi->flood.Efl);
 +        }
-     fprintf(fp,"\n");
++        fprintf(fp, EDcol_efmt, edi->flood.Vfl);
 +
-         write_edo_flood(edi,edo,step);
++        /* Output deltaF if we are doing adaptive flooding */
++        if (0 != edi->flood.tau)
++        {
++            fprintf(fp, EDcol_efmt, edi->flood.deltaF);
++        }
++        fprintf(fp, EDcol_efmt, edi->flood.vecs.fproj[i]);
++    }
 +}
 +
 +
 +/* From flood.xproj compute the Vfl(x) at this point */
 +static real flood_energy(t_edpar *edi, gmx_large_int_t step)
 +{
 +    /* compute flooding energy Vfl
 +     Vfl = Efl * exp( - \frac {kT} {2Efl alpha^2} * sum_i { \lambda_i c_i^2 } )
 +     \lambda_i is the reciprocal eigenvalue 1/\sigma_i
 +         it is already computed by make_edi and stored in stpsz[i]
 +     bHarmonic:
 +       Vfl = - Efl * 1/2(sum _i {\frac 1{\lambda_i} c_i^2})
 +     */
 +    real sum;
 +    real Vfl;
 +    int i;
 +
 +
 +    /* Each time this routine is called (i.e. each time step), we add a small
 +     * value to the reference projection. This way a harmonic restraint towards
 +     * a moving reference is realized. If no value for the additive constant
 +     * is provided in the edi file, the reference will not change. */
 +    if (edi->flood.bHarmonic)
 +    {
 +        for (i=0; i<edi->flood.vecs.neig; i++)
 +        {
 +            edi->flood.vecs.refproj[i] = edi->flood.vecs.refproj0[i] + step * edi->flood.vecs.refprojslope[i];
 +        }
 +    }
 +
 +    sum=0.0;
 +    /* Compute sum which will be the exponent of the exponential */
 +    for (i=0; i<edi->flood.vecs.neig; i++)
 +    {
 +        /* stpsz stores the reciprocal eigenvalue 1/sigma_i */
 +        sum += edi->flood.vecs.stpsz[i]*(edi->flood.vecs.xproj[i]-edi->flood.vecs.refproj[i])*(edi->flood.vecs.xproj[i]-edi->flood.vecs.refproj[i]);
 +    }
 +
 +    /* Compute the Gauss function*/
 +    if (edi->flood.bHarmonic)
 +    {
 +        Vfl = -0.5*edi->flood.Efl*sum;  /* minus sign because Efl is negative, if restrain is on. */
 +    }
 +    else
 +    {
 +        Vfl = edi->flood.Efl!=0 ? edi->flood.Efl*exp(-edi->flood.kT/2/edi->flood.Efl/edi->flood.alpha2*sum) :0;
 +    }
 +
 +    return Vfl;
 +}
 +
 +
 +/* From the position and from Vfl compute forces in subspace -> store in edi->vec.flood.fproj */
 +static void flood_forces(t_edpar *edi)
 +{
 +    /* compute the forces in the subspace of the flooding eigenvectors
 +     * by the formula F_i= V_{fl}(c) * ( \frac {kT} {E_{fl}} \lambda_i c_i */
 +
 +    int i;
 +    real energy=edi->flood.Vfl;
 +
 +
 +    if (edi->flood.bHarmonic)
++    {
 +        for (i=0; i<edi->flood.vecs.neig; i++)
 +        {
 +            edi->flood.vecs.fproj[i] = edi->flood.Efl* edi->flood.vecs.stpsz[i]*(edi->flood.vecs.xproj[i]-edi->flood.vecs.refproj[i]);
 +        }
++    }
 +    else
++    {
 +        for (i=0; i<edi->flood.vecs.neig; i++)
 +        {
 +            /* if Efl is zero the forces are zero if not use the formula */
 +            edi->flood.vecs.fproj[i] = edi->flood.Efl!=0 ? edi->flood.kT/edi->flood.Efl/edi->flood.alpha2*energy*edi->flood.vecs.stpsz[i]*(edi->flood.vecs.xproj[i]-edi->flood.vecs.refproj[i]) : 0;
 +        }
++    }
 +}
 +
 +
 +/* Raise forces from subspace into cartesian space */
 +static void flood_blowup(t_edpar *edi, rvec *forces_cart)
 +{
 +    /* this function lifts the forces from the subspace to the cartesian space
 +     all the values not contained in the subspace are assumed to be zero and then
 +     a coordinate transformation from eigenvector to cartesian vectors is performed
 +     The nonexistent values don't have to be set to zero explicitly, they would occur
 +     as zero valued summands, hence we just stop to compute this part of the sum.
 +
 +     for every atom we add all the contributions to this atom from all the different eigenvectors.
 +
 +     NOTE: one could add directly to the forcefield forces, would mean we wouldn't have to clear the
 +     field forces_cart prior the computation, but we compute the forces separately
 +     to have them accessible for diagnostics
 +     */
 +    int  j,eig;
 +    rvec dum;
 +    real *forces_sub;
 +
 +
 +    forces_sub = edi->flood.vecs.fproj;
 +
 +
 +    /* Calculate the cartesian forces for the local atoms */
 +
 +    /* Clear forces first */
 +    for (j=0; j<edi->sav.nr_loc; j++)
++    {
 +        clear_rvec(forces_cart[j]);
++    }
 +
 +    /* Now compute atomwise */
 +    for (j=0; j<edi->sav.nr_loc; j++)
 +    {
 +        /* Compute forces_cart[edi->sav.anrs[j]] */
 +        for (eig=0; eig<edi->flood.vecs.neig; eig++)
 +        {
 +            /* Force vector is force * eigenvector (compute only atom j) */
 +            svmul(forces_sub[eig],edi->flood.vecs.vec[eig][edi->sav.c_ind[j]],dum);
 +            /* Add this vector to the cartesian forces */
 +            rvec_inc(forces_cart[j],dum);
 +        }
 +    }
 +}
 +
 +
 +/* Update the values of Efl, deltaF depending on tau and Vfl */
 +static void update_adaption(t_edpar *edi)
 +{
 +    /* this function updates the parameter Efl and deltaF according to the rules given in
 +     * 'predicting unimolecular chemical reactions: chemical flooding' M Mueller et al,
 +     * J. chem Phys. */
 +
 +    if ((edi->flood.tau < 0 ? -edi->flood.tau : edi->flood.tau ) > 0.00000001)
 +    {
 +        edi->flood.Efl = edi->flood.Efl+edi->flood.dt/edi->flood.tau*(edi->flood.deltaF0-edi->flood.deltaF);
 +        /* check if restrain (inverted flooding) -> don't let EFL become positive */
 +        if (edi->flood.alpha2<0 && edi->flood.Efl>-0.00000001)
++        {
 +            edi->flood.Efl = 0;
++        }
 +
 +        edi->flood.deltaF = (1-edi->flood.dt/edi->flood.tau)*edi->flood.deltaF+edi->flood.dt/edi->flood.tau*edi->flood.Vfl;
 +    }
 +}
 +
 +
 +static void do_single_flood(
 +        FILE *edo,
 +        rvec x[],
 +        rvec force[],
 +        t_edpar *edi,
 +        gmx_large_int_t step,
 +        matrix box,
 +        t_commrec *cr,
 +        gmx_bool bNS)       /* Are we in a neighbor searching step? */
 +{
 +    int i;
 +    matrix  rotmat;         /* rotation matrix */
 +    matrix  tmat;           /* inverse rotation */
 +    rvec    transvec;       /* translation vector */
++    real    rmsdev;
 +    struct t_do_edsam *buf;
 +
 +
 +    buf=edi->buf->do_edsam;
 +
 +
 +    /* Broadcast the positions of the AVERAGE structure such that they are known on
 +     * every processor. Each node contributes its local positions x and stores them in
 +     * the collective ED array buf->xcoll */
 +    communicate_group_positions(cr, buf->xcoll, buf->shifts_xcoll, buf->extra_shifts_xcoll, bNS, x,
 +                    edi->sav.nr, edi->sav.nr_loc, edi->sav.anrs_loc, edi->sav.c_ind, edi->sav.x_old, box);
 +
 +    /* Only assembly REFERENCE positions if their indices differ from the average ones */
 +    if (!edi->bRefEqAv)
++    {
 +        communicate_group_positions(cr, buf->xc_ref, buf->shifts_xc_ref, buf->extra_shifts_xc_ref, bNS, x,
 +                edi->sref.nr, edi->sref.nr_loc, edi->sref.anrs_loc, edi->sref.c_ind, edi->sref.x_old, box);
++    }
 +
 +    /* If bUpdateShifts was TRUE, the shifts have just been updated in get_positions.
 +     * We do not need to update the shifts until the next NS step */
 +    buf->bUpdateShifts = FALSE;
 +
 +    /* Now all nodes have all of the ED/flooding positions in edi->sav->xcoll,
 +     * as well as the indices in edi->sav.anrs */
 +
 +    /* Fit the reference indices to the reference structure */
 +    if (edi->bRefEqAv)
++    {
 +        fit_to_reference(buf->xcoll , transvec, rotmat, edi);
++    }
 +    else
++    {
 +        fit_to_reference(buf->xc_ref, transvec, rotmat, edi);
++    }
 +
 +    /* Now apply the translation and rotation to the ED structure */
 +    translate_and_rotate(buf->xcoll, edi->sav.nr, transvec, rotmat);
 +
 +    /* Project fitted structure onto supbspace -> store in edi->flood.vecs.xproj */
 +    project_to_eigvectors(buf->xcoll,&edi->flood.vecs,edi);
 +
 +    if (FALSE == edi->flood.bConstForce)
 +    {
 +        /* Compute Vfl(x) from flood.xproj */
 +        edi->flood.Vfl = flood_energy(edi, step);
 +
 +        update_adaption(edi);
 +
 +        /* Compute the flooding forces */
 +        flood_forces(edi);
 +    }
 +
 +    /* Translate them into cartesian positions */
 +    flood_blowup(edi, edi->flood.forces_cartesian);
 +
 +    /* Rotate forces back so that they correspond to the given structure and not to the fitted one */
 +    /* Each node rotates back its local forces */
 +    transpose(rotmat,tmat);
 +    rotate_x(edi->flood.forces_cartesian, edi->sav.nr_loc, tmat);
 +
 +    /* Finally add forces to the main force variable */
 +    for (i=0; i<edi->sav.nr_loc; i++)
++    {
 +        rvec_inc(force[edi->sav.anrs_loc[i]],edi->flood.forces_cartesian[i]);
++    }
 +
 +    /* Output is written by the master process */
 +    if (do_per_step(step,edi->outfrq) && MASTER(cr))
-         FILE            *log,    /* md.log file */
++    {
++        /* Output how well we fit to the reference */
++        if (edi->bRefEqAv)
++        {
++            /* Indices of reference and average structures are identical,
++             * thus we can calculate the rmsd to SREF using xcoll */
++            rmsdev = rmsd_from_structure(buf->xcoll,&edi->sref);
++        }
++        else
++        {
++            /* We have to translate & rotate the reference atoms first */
++            translate_and_rotate(buf->xc_ref, edi->sref.nr, transvec, rotmat);
++            rmsdev = rmsd_from_structure(buf->xc_ref,&edi->sref);
++        }
++
++        write_edo_flood(edi,edo,rmsdev);
++    }
 +}
 +
 +
 +/* Main flooding routine, called from do_force */
 +extern void do_flood(
-         gmx_edsam_t     ed,      /* ed data structure contains all ED and flooding datasets */
 +        t_commrec       *cr,     /* Communication record */
++        t_inputrec      *ir,     /* Input record */
 +        rvec            x[],     /* Positions on the local processor */
 +        rvec            force[], /* forcefield forces, to these the flooding forces are added */
-     edi = ed->edpar;
++        gmx_edsam_t     ed,      /* ed data structure contains all ED and flooding groups */
 +        matrix          box,     /* the box */
 +        gmx_large_int_t step,    /* The relative time step since ir->init_step is already subtracted */
 +        gmx_bool        bNS)     /* Are we in a neighbor searching step? */
 +{
 +    t_edpar *edi;
 +
 +
++    edi = ed->edpar;
++
++    /* Write time to edo, when required. Output the time anyhow since we need
++     * it in the output file for ED constraints. */
++    if (MASTER(cr) && do_per_step(step,edi->outfrq))
++    {
++        fprintf(ed->edo, "\n%12f", ir->init_t + step*ir->delta_t);
++    }
++
 +    if (ed->eEDtype != eEDflood)
++    {
 +        return;
++    }
 +
- static void init_flood(t_edpar *edi, gmx_edsam_t ed, real dt, t_commrec *cr)
 +    while (edi)
 +    {
 +        /* Call flooding for one matrix */
 +        if (edi->flood.vecs.neig)
++        {
 +            do_single_flood(ed->edo,x,force,edi,step,box,cr,bNS);
++        }
 +        edi = edi->next_edi;
 +    }
 +}
 +
 +
 +/* Called by init_edi, configure some flooding related variables and structures,
 + * print headers to output files */
-         /* If in any of the datasets we find a flooding vector, flooding is turned on */
++static void init_flood(t_edpar *edi, gmx_edsam_t ed, real dt)
 +{
 +    int i;
 +
 +
 +    edi->flood.Efl = edi->flood.constEfl;
 +    edi->flood.Vfl = 0;
 +    edi->flood.dt  = dt;
 +
 +    if (edi->flood.vecs.neig)
 +    {
-         fprintf(stderr,"ED: Flooding of matrix %d is switched on.\n", edi->flood.flood_id);
++        /* If in any of the ED groups we find a flooding vector, flooding is turned on */
 +        ed->eEDtype = eEDflood;
 +
-         fprintf(ed->edo,"FL_HEADER: Flooding of matrix %d is switched on! The flooding output will have the following format:\n",
-                 edi->flood.flood_id);
-         fprintf(ed->edo,"FL_HEADER: Step     Efl          Vfl       deltaF\n");
++        fprintf(stderr,"ED: Flooding %d eigenvector%s.\n", edi->flood.vecs.neig, edi->flood.vecs.neig > 1 ? "s":"");
 +
 +        if (edi->flood.bConstForce)
 +        {
 +            /* We have used stpsz as a vehicle to carry the fproj values for constant
 +             * force flooding. Now we copy that to flood.vecs.fproj. Note that
 +             * in const force flooding, fproj is never changed. */
 +            for (i=0; i<edi->flood.vecs.neig; i++)
 +            {
 +                edi->flood.vecs.fproj[i] = edi->flood.vecs.stpsz[i];
 +
 +                fprintf(stderr, "ED: applying on eigenvector %d a constant force of %g\n",
 +                        edi->flood.vecs.ieig[i], edi->flood.vecs.fproj[i]);
 +            }
 +        }
- gmx_edsam_t ed_open(int nfile,const t_filenm fnm[],unsigned long Flags,t_commrec *cr)
 +    }
 +}
 +
 +
 +#ifdef DEBUGHELPERS
 +/*********** Energy book keeping ******/
 +static void get_flood_enx_names(t_edpar *edi, char** names, int *nnames)  /* get header of energies */
 +{
 +    t_edpar *actual;
 +    int count;
 +    char buf[STRLEN];
 +    actual=edi;
 +    count = 1;
 +    while (actual)
 +    {
 +        srenew(names,count);
 +        sprintf(buf,"Vfl_%d",count);
 +        names[count-1]=strdup(buf);
 +        actual=actual->next_edi;
 +        count++;
 +    }
 +    *nnames=count-1;
 +}
 +
 +
 +static void get_flood_energies(t_edpar *edi, real Vfl[],int nnames)
 +{
 +    /*fl has to be big enough to capture nnames-many entries*/
 +    t_edpar *actual;
 +    int count;
 +
 +
 +    actual=edi;
 +    count = 1;
 +    while (actual)
 +    {
 +        Vfl[count-1]=actual->flood.Vfl;
 +        actual=actual->next_edi;
 +        count++;
 +    }
 +    if (nnames!=count-1)
++    {
 +        gmx_fatal(FARGS,"Number of energies is not consistent with t_edi structure");
++    }
 +}
 +/************* END of FLOODING IMPLEMENTATION ****************************/
 +#endif
 +
 +
-         /* Open .edi input file: */
-         ed->edinam=ftp2fn(efEDI,nfile,fnm);
-         /* The master opens the .edo output file */
++gmx_edsam_t ed_open(int natoms, edsamstate_t *EDstate, int nfile,const t_filenm fnm[],unsigned long Flags, const output_env_t oenv, t_commrec *cr)
 +{
 +    gmx_edsam_t ed;
++    int         nED;
 +
 +
 +    /* Allocate space for the ED data structure */
 +    snew(ed, 1);
 +
 +    /* We want to perform ED (this switch might later be upgraded to eEDflood) */
 +    ed->eEDtype = eEDedsam;
 +
 +    if (MASTER(cr))
 +    {
-         ed->edonam = ftp2fn(efEDO,nfile,fnm);
-         ed->edo    = gmx_fio_fopen(ed->edonam,(Flags & MD_APPENDFILES)? "a+" : "w+");
-         ed->bStartFromCpt = Flags & MD_STARTFROMCPT;
 +        fprintf(stderr,"ED sampling will be performed!\n");
-         /* Set the pointer to the next ED dataset */
++        snew(ed->edpar,1);
++
++        /* Read the edi input file: */
++        nED = read_edi_file(ftp2fn(efEDI,nfile,fnm),ed->edpar,natoms);
++
++        /* Make sure the checkpoint was produced in a run using this .edi file */
++        if (EDstate->bFromCpt)
++        {
++            crosscheck_edi_file_vs_checkpoint(ed, EDstate);
++        }
++        else 
++        {
++            EDstate->nED = nED;
++        }
++        init_edsamstate(ed, EDstate);
++
++        /* The master opens the ED output file */
++        if (Flags & MD_APPENDFILES)
++        {
++            ed->edo = gmx_fio_fopen(opt2fn("-eo",nfile,fnm),"a+");
++        }
++        else
++        {
++            ed->edo = xvgropen(opt2fn("-eo",nfile,fnm), 
++                    "Essential dynamics / flooding output", 
++                    "Time (ps)", 
++                    "RMSDs (nm), projections on EVs (nm), ...", oenv);
++
++            /* Make a descriptive legend */
++            write_edo_legend(ed, EDstate->nED, oenv);
++        }
 +    }
 +    return ed;
 +}
 +
 +
 +/* Broadcasts the structure data */
 +static void bc_ed_positions(t_commrec *cr, struct gmx_edx *s, int stype)
 +{
 +    snew_bc(cr, s->anrs, s->nr   );    /* Index numbers     */
 +    snew_bc(cr, s->x   , s->nr   );    /* Positions         */
 +    nblock_bc(cr, s->nr, s->anrs );
 +    nblock_bc(cr, s->nr, s->x    );
 +
 +    /* For the average & reference structures we need an array for the collective indices,
 +     * and we need to broadcast the masses as well */
 +    if (stype == eedAV || stype == eedREF)
 +    {
 +        /* We need these additional variables in the parallel case: */
 +        snew(s->c_ind    , s->nr   );   /* Collective indices */
 +        /* Local atom indices get assigned in dd_make_local_group_indices.
 +         * There, also memory is allocated */
 +        s->nalloc_loc = 0;              /* allocation size of s->anrs_loc */
 +        snew_bc(cr, s->x_old, s->nr);   /* To be able to always make the ED molecule whole, ...        */
 +        nblock_bc(cr, s->nr, s->x_old); /* ... keep track of shift changes with the help of old coords */
 +    }
 +
 +    /* broadcast masses for the reference structure (for mass-weighted fitting) */
 +    if (stype == eedREF)
 +    {
 +        snew_bc(cr, s->m, s->nr);
 +        nblock_bc(cr, s->nr, s->m);
 +    }
 +
 +    /* For the average structure we might need the masses for mass-weighting */
 +    if (stype == eedAV)
 +    {
 +        snew_bc(cr, s->sqrtm, s->nr);
 +        nblock_bc(cr, s->nr, s->sqrtm);
 +        snew_bc(cr, s->m, s->nr);
 +        nblock_bc(cr, s->nr, s->m);
 +    }
 +}
 +
 +
 +/* Broadcasts the eigenvector data */
 +static void bc_ed_vecs(t_commrec *cr, t_eigvec *ev, int length, gmx_bool bHarmonic)
 +{
 +    int i;
 +
 +    snew_bc(cr, ev->ieig   , ev->neig);  /* index numbers of eigenvector  */
 +    snew_bc(cr, ev->stpsz  , ev->neig);  /* stepsizes per eigenvector     */
 +    snew_bc(cr, ev->xproj  , ev->neig);  /* instantaneous x projection    */
 +    snew_bc(cr, ev->fproj  , ev->neig);  /* instantaneous f projection    */
 +    snew_bc(cr, ev->refproj, ev->neig);  /* starting or target projection */
 +
 +    nblock_bc(cr, ev->neig, ev->ieig   );
 +    nblock_bc(cr, ev->neig, ev->stpsz  );
 +    nblock_bc(cr, ev->neig, ev->xproj  );
 +    nblock_bc(cr, ev->neig, ev->fproj  );
 +    nblock_bc(cr, ev->neig, ev->refproj);
 +
 +    snew_bc(cr, ev->vec, ev->neig);      /* Eigenvector components        */
 +    for (i=0; i<ev->neig; i++)
 +    {
 +        snew_bc(cr, ev->vec[i], length);
 +        nblock_bc(cr, length, ev->vec[i]);
 +    }
 +
 +    /* For harmonic restraints the reference projections can change with time */
 +    if (bHarmonic)
 +    {
 +        snew_bc(cr, ev->refproj0    , ev->neig);
 +        snew_bc(cr, ev->refprojslope, ev->neig);
 +        nblock_bc(cr, ev->neig, ev->refproj0    );
 +        nblock_bc(cr, ev->neig, ev->refprojslope);
 +    }
 +}
 +
 +
 +/* Broadcasts the ED / flooding data to other nodes
 + * and allocates memory where needed */
 +static void broadcast_ed_data(t_commrec *cr, gmx_edsam_t ed, int numedis)
 +{
 +    int     nr;
 +    t_edpar *edi;
 +
 +
 +    /* Master lets the other nodes know if its ED only or also flooding */
 +    gmx_bcast(sizeof(ed->eEDtype), &(ed->eEDtype), cr);
 +
 +    snew_bc(cr, ed->edpar,1);
 +    /* Now transfer the ED data set(s) */
 +    edi = ed->edpar;
 +    for (nr=0; nr<numedis; nr++)
 +    {
 +        /* Broadcast a single ED data set */
 +        block_bc(cr, *edi);
 +
 +        /* Broadcast positions */
 +        bc_ed_positions(cr, &(edi->sref), eedREF); /* reference positions (don't broadcast masses)    */
 +        bc_ed_positions(cr, &(edi->sav ), eedAV ); /* average positions (do broadcast masses as well) */
 +        bc_ed_positions(cr, &(edi->star), eedTAR); /* target positions                                */
 +        bc_ed_positions(cr, &(edi->sori), eedORI); /* origin positions                                */
 +
 +        /* Broadcast eigenvectors */
 +        bc_ed_vecs(cr, &edi->vecs.mon   , edi->sav.nr, FALSE);
 +        bc_ed_vecs(cr, &edi->vecs.linfix, edi->sav.nr, FALSE);
 +        bc_ed_vecs(cr, &edi->vecs.linacc, edi->sav.nr, FALSE);
 +        bc_ed_vecs(cr, &edi->vecs.radfix, edi->sav.nr, FALSE);
 +        bc_ed_vecs(cr, &edi->vecs.radacc, edi->sav.nr, FALSE);
 +        bc_ed_vecs(cr, &edi->vecs.radcon, edi->sav.nr, FALSE);
 +        /* Broadcast flooding eigenvectors and, if needed, values for the moving reference */
 +        bc_ed_vecs(cr, &edi->flood.vecs,  edi->sav.nr, edi->flood.bHarmonic);
 +
- static void init_edi(gmx_mtop_t *mtop,t_inputrec *ir,
-                      t_commrec *cr,gmx_edsam_t ed,t_edpar *edi)
++        /* Set the pointer to the next ED group */
 +        if (edi->next_edi)
 +        {
 +          snew_bc(cr, edi->next_edi, 1);
 +          edi = edi->next_edi;
 +        }
 +    }
 +}
 +
 +
 +/* init-routine called for every *.edi-cycle, initialises t_edpar structure */
- static int read_edi(FILE* in, gmx_edsam_t ed,t_edpar *edi,int nr_mdatoms, int edi_nr, t_commrec *cr)
++static void init_edi(gmx_mtop_t *mtop,t_edpar *edi)
 +{
 +    int  i;
 +    real totalmass = 0.0;
 +    rvec com;
 +    gmx_mtop_atomlookup_t alook=NULL;
 +    t_atom *atom;
 +
 +    /* NOTE Init_edi is executed on the master process only
 +     * The initialized data sets are then transmitted to the
 +     * other nodes in broadcast_ed_data */
 +
 +    edi->bNeedDoEdsam = edi->vecs.mon.neig
 +                     || edi->vecs.linfix.neig
 +                     || edi->vecs.linacc.neig
 +                     || edi->vecs.radfix.neig
 +                     || edi->vecs.radacc.neig
 +                     || edi->vecs.radcon.neig;
 +
 +    alook = gmx_mtop_atomlookup_init(mtop);
 +
 +    /* evaluate masses (reference structure) */
 +    snew(edi->sref.m, edi->sref.nr);
 +    for (i = 0; i < edi->sref.nr; i++)
 +    {
 +        if (edi->fitmas)
 +        {
 +            gmx_mtop_atomnr_to_atom(alook,edi->sref.anrs[i],&atom);
 +            edi->sref.m[i] = atom->m;
 +        }
 +        else
 +        {
 +            edi->sref.m[i] = 1.0;
 +        }
 +
 +        /* Check that every m > 0. Bad things will happen otherwise. */
 +        if (edi->sref.m[i] <= 0.0)
 +        {
 +            gmx_fatal(FARGS, "Reference structure atom %d (sam.edi index %d) has a mass of %g.\n"
 +                             "For a mass-weighted fit, all reference structure atoms need to have a mass >0.\n"
 +                             "Either make the covariance analysis non-mass-weighted, or exclude massless\n"
 +                             "atoms from the reference structure by creating a proper index group.\n",
 +                      i, edi->sref.anrs[i]+1, edi->sref.m[i]);
 +        }
 +
 +        totalmass += edi->sref.m[i];
 +    }
 +    edi->sref.mtot = totalmass;
 +
 +    /* Masses m and sqrt(m) for the average structure. Note that m
 +     * is needed if forces have to be evaluated in do_edsam */
 +    snew(edi->sav.sqrtm, edi->sav.nr );
 +    snew(edi->sav.m    , edi->sav.nr );
 +    for (i = 0; i < edi->sav.nr; i++)
 +    {
 +        gmx_mtop_atomnr_to_atom(alook,edi->sav.anrs[i],&atom);
 +        edi->sav.m[i] = atom->m;
 +        if (edi->pcamas)
 +        {
 +            edi->sav.sqrtm[i] = sqrt(atom->m);
 +        }
 +        else
 +        {
 +            edi->sav.sqrtm[i] = 1.0;
 +        }
 +
 +        /* Check that every m > 0. Bad things will happen otherwise. */
 +        if (edi->sav.sqrtm[i] <= 0.0)
 +        {
 +            gmx_fatal(FARGS, "Average structure atom %d (sam.edi index %d) has a mass of %g.\n"
 +                             "For ED with mass-weighting, all average structure atoms need to have a mass >0.\n"
 +                             "Either make the covariance analysis non-mass-weighted, or exclude massless\n"
 +                             "atoms from the average structure by creating a proper index group.\n",
 +                      i, edi->sav.anrs[i]+1, atom->m);
 +        }
 +    }
 +
 +    gmx_mtop_atomlookup_destroy(alook);
 +
 +    /* put reference structure in origin */
 +    get_center(edi->sref.x, edi->sref.m, edi->sref.nr, com);
 +    com[XX] = -com[XX];
 +    com[YY] = -com[YY];
 +    com[ZZ] = -com[ZZ];
 +    translate_x(edi->sref.x, edi->sref.nr, com);
 +
 +    /* Init ED buffer */
 +    snew(edi->buf, 1);
 +}
 +
 +
 +static void check(const char *line, const char *label)
 +{
 +    if (!strstr(line,label))
++    {
 +        gmx_fatal(FARGS,"Could not find input parameter %s at expected position in edsam input-file (.edi)\nline read instead is %s",label,line);
++    }
 +}
 +
 +
 +static int read_checked_edint(FILE *file,const char *label)
 +{
 +    char line[STRLEN+1];
 +    int idum;
 +
 +
 +    fgets2 (line,STRLEN,file);
 +    check(line,label);
 +    fgets2 (line,STRLEN,file);
 +    sscanf (line,"%d",&idum);
 +    return idum;
 +}
 +
 +
 +static int read_edint(FILE *file,gmx_bool *bEOF)
 +{
 +    char line[STRLEN+1];
 +    int idum;
 +    char *eof;
 +
 +
 +    eof=fgets2 (line,STRLEN,file);
 +    if (eof==NULL)
 +    {
 +        *bEOF = TRUE;
 +        return -1;
 +    }
 +    eof=fgets2 (line,STRLEN,file);
 +    if (eof==NULL)
 +    {
 +        *bEOF = TRUE;
 +        return -1;
 +    }
 +    sscanf (line,"%d",&idum);
 +    *bEOF = FALSE;
 +    return idum;
 +}
 +
 +
 +static real read_checked_edreal(FILE *file,const char *label)
 +{
 +    char line[STRLEN+1];
 +    double rdum;
 +
 +
 +    fgets2 (line,STRLEN,file);
 +    check(line,label);
 +    fgets2 (line,STRLEN,file);
 +    sscanf (line,"%lf",&rdum);
 +    return (real) rdum; /* always read as double and convert to single */
 +}
 +
 +
 +static void read_edx(FILE *file,int number,int *anrs,rvec *x)
 +{
 +    int i,j;
 +    char line[STRLEN+1];
 +    double d[3];
 +
 +
 +    for(i=0; i<number; i++)
 +    {
 +        fgets2 (line,STRLEN,file);
 +        sscanf (line,"%d%lf%lf%lf",&anrs[i],&d[0],&d[1],&d[2]);
 +        anrs[i]--; /* we are reading FORTRAN indices */
 +        for(j=0; j<3; j++)
++        {
 +            x[i][j]=d[j]; /* always read as double and convert to single */
++        }
 +    }
 +}
 +
 +
 +static void scan_edvec(FILE *in,int nr,rvec *vec)
 +{
 +    char line[STRLEN+1];
 +    int i;
 +    double x,y,z;
 +
 +
 +    for(i=0; (i < nr); i++)
 +    {
 +        fgets2 (line,STRLEN,in);
 +        sscanf (line,"%le%le%le",&x,&y,&z);
 +        vec[i][XX]=x;
 +        vec[i][YY]=y;
 +        vec[i][ZZ]=z;
 +    }
 +}
 +
 +
 +static void read_edvec(FILE *in,int nr,t_eigvec *tvec,gmx_bool bReadRefproj, gmx_bool *bHaveReference)
 +{
 +    int i,idum,nscan;
 +    double rdum,refproj_dum=0.0,refprojslope_dum=0.0;
 +    char line[STRLEN+1];
 +
 +
 +    tvec->neig=read_checked_edint(in,"NUMBER OF EIGENVECTORS");
 +    if (tvec->neig >0)
 +    {
 +        snew(tvec->ieig   ,tvec->neig);
 +        snew(tvec->stpsz  ,tvec->neig);
 +        snew(tvec->vec    ,tvec->neig);
 +        snew(tvec->xproj  ,tvec->neig);
 +        snew(tvec->fproj  ,tvec->neig);
 +        snew(tvec->refproj,tvec->neig);
 +        if (bReadRefproj)
 +        {
 +            snew(tvec->refproj0    ,tvec->neig);
 +            snew(tvec->refprojslope,tvec->neig);
 +        }
 +
 +        for(i=0; (i < tvec->neig); i++)
 +        {
 +            fgets2 (line,STRLEN,in);
 +            if (bReadRefproj) /* ONLY when using flooding as harmonic restraint */
 +            {
 +                nscan = sscanf(line,"%d%lf%lf%lf",&idum,&rdum,&refproj_dum,&refprojslope_dum);
 +                /* Zero out values which were not scanned */
 +                switch(nscan)
 +                {
 +                    case 4:
 +                        /* Every 4 values read, including reference position */
 +                        *bHaveReference = TRUE;
 +                        break;
 +                    case 3:
 +                        /* A reference position is provided */
 +                        *bHaveReference = TRUE;
 +                        /* No value for slope, set to 0 */
 +                        refprojslope_dum = 0.0;
 +                        break;
 +                    case 2:
 +                        /* No values for reference projection and slope, set to 0 */
 +                        refproj_dum      = 0.0;
 +                        refprojslope_dum = 0.0;
 +                        break;
 +                    default:
 +                        gmx_fatal(FARGS,"Expected 2 - 4 (not %d) values for flooding vec: <nr> <spring const> <refproj> <refproj-slope>\n", nscan);
 +                        break;
 +                }
 +                tvec->refproj[i]=refproj_dum;
 +                tvec->refproj0[i]=refproj_dum;
 +                tvec->refprojslope[i]=refprojslope_dum;
 +            }
 +            else /* Normal flooding */
 +            {
 +                nscan = sscanf(line,"%d%lf",&idum,&rdum);
 +                if (nscan != 2)
++                {
 +                    gmx_fatal(FARGS,"Expected 2 values for flooding vec: <nr> <stpsz>\n");
++                }
 +            }
 +            tvec->ieig[i]=idum;
 +            tvec->stpsz[i]=rdum;
 +        } /* end of loop over eigenvectors */
 +
 +        for(i=0; (i < tvec->neig); i++)
 +        {
 +            snew(tvec->vec[i],nr);
 +            scan_edvec(in,nr,tvec->vec[i]);
 +        }
 +    }
 +}
 +
 +
 +/* calls read_edvec for the vector groups, only for flooding there is an extra call */
 +static void read_edvecs(FILE *in,int nr,t_edvecs *vecs)
 +{
 +      gmx_bool bHaveReference = FALSE;
 +
 +
 +    read_edvec(in, nr, &vecs->mon   , FALSE, &bHaveReference);
 +    read_edvec(in, nr, &vecs->linfix, FALSE, &bHaveReference);
 +    read_edvec(in, nr, &vecs->linacc, FALSE, &bHaveReference);
 +    read_edvec(in, nr, &vecs->radfix, FALSE, &bHaveReference);
 +    read_edvec(in, nr, &vecs->radacc, FALSE, &bHaveReference);
 +    read_edvec(in, nr, &vecs->radcon, FALSE, &bHaveReference);
 +}
 +
 +
 +/* Check if the same atom indices are used for reference and average positions */
 +static gmx_bool check_if_same(struct gmx_edx sref, struct gmx_edx sav)
 +{
 +    int i;
 +
 +
 +    /* If the number of atoms differs between the two structures,
 +     * they cannot be identical */
 +    if (sref.nr != sav.nr)
++    {
 +        return FALSE;
++    }
 +
 +    /* Now that we know that both stuctures have the same number of atoms,
 +     * check if also the indices are identical */
 +    for (i=0; i < sav.nr; i++)
 +    {
 +        if (sref.anrs[i] != sav.anrs[i])
++        {
 +            return FALSE;
++        }
 +    }
 +    fprintf(stderr, "ED: Note: Reference and average structure are composed of the same atom indices.\n");
 +
 +    return TRUE;
 +}
 +
 +
-             gmx_fatal(FARGS,"Wrong magic number %d in %s",readmagic,ed->edinam);
++static int read_edi(FILE* in,t_edpar *edi,int nr_mdatoms, const char *fn)
 +{
 +    int readmagic;
 +    const int magic=670;
 +    gmx_bool bEOF;
 +
 +    /* Was a specific reference point for the flooding/umbrella potential provided in the edi file? */
 +    gmx_bool bHaveReference = FALSE;
 +
 +
 +    /* the edi file is not free format, so expect problems if the input is corrupt. */
 +
 +    /* check the magic number */
 +    readmagic=read_edint(in,&bEOF);
 +    /* Check whether we have reached the end of the input file */
 +    if (bEOF)
++    {
 +        return 0;
++    }
 +
 +    if (readmagic != magic)
 +    {
 +        if (readmagic==666 || readmagic==667 || readmagic==668)
++        {
 +            gmx_fatal(FARGS,"Wrong magic number: Use newest version of make_edi to produce edi file");
++        }
 +        else if (readmagic != 669)
-         gmx_fatal(FARGS,"Nr of atoms in %s (%d) does not match nr of md atoms (%d)",
-                 ed->edinam,edi->nini,nr_mdatoms);
++        {
++            gmx_fatal(FARGS,"Wrong magic number %d in %s",readmagic,fn);
++        }
 +    }
 +
 +    /* check the number of atoms */
 +    edi->nini=read_edint(in,&bEOF);
 +    if (edi->nini != nr_mdatoms)
-     edi->flood.flood_id  = edi_nr;
++    {
++        gmx_fatal(FARGS,"Nr of atoms in %s (%d) does not match nr of md atoms (%d)", fn,edi->nini,nr_mdatoms);
++    }
 +
 +    /* Done checking. For the rest we blindly trust the input */
 +    edi->fitmas          = read_checked_edint(in,"FITMAS");
 +    edi->pcamas          = read_checked_edint(in,"ANALYSIS_MAS");
 +    edi->outfrq          = read_checked_edint(in,"OUTFRQ");
 +    edi->maxedsteps      = read_checked_edint(in,"MAXLEN");
 +    edi->slope           = read_checked_edreal(in,"SLOPECRIT");
 +
 +    edi->presteps        = read_checked_edint(in,"PRESTEPS");
 +    edi->flood.deltaF0   = read_checked_edreal(in,"DELTA_F0");
 +    edi->flood.deltaF    = read_checked_edreal(in,"INIT_DELTA_F");
 +    edi->flood.tau       = read_checked_edreal(in,"TAU");
 +    edi->flood.constEfl  = read_checked_edreal(in,"EFL_NULL");
 +    edi->flood.alpha2    = read_checked_edreal(in,"ALPHA2");
 +    edi->flood.kT        = read_checked_edreal(in,"KT");
 +    edi->flood.bHarmonic = read_checked_edint(in,"HARMONIC");
 +    if (readmagic > 669)
++    {
 +        edi->flood.bConstForce = read_checked_edint(in,"CONST_FORCE_FLOODING");
++    }
 +    else
++    {
 +        edi->flood.bConstForce = FALSE;
-       if (bHaveReference)
-       {
-               /* Both an -ori structure and a at least one manual reference point have been
-                * specified. That's ambiguous and probably not intentional. */
-               gmx_fatal(FARGS, "ED: An origin structure has been provided and a at least one (moving) reference\n"
-                                "    point was manually specified in the edi file. That is ambiguous. Aborting.\n");
-       }
++    }
 +    edi->sref.nr         = read_checked_edint(in,"NREF");
 +
 +    /* allocate space for reference positions and read them */
 +    snew(edi->sref.anrs,edi->sref.nr);
 +    snew(edi->sref.x   ,edi->sref.nr);
 +    snew(edi->sref.x_old,edi->sref.nr);
 +    edi->sref.sqrtm    =NULL;
 +    read_edx(in,edi->sref.nr,edi->sref.anrs,edi->sref.x);
 +
 +    /* average positions. they define which atoms will be used for ED sampling */
 +    edi->sav.nr=read_checked_edint(in,"NAV");
 +    snew(edi->sav.anrs,edi->sav.nr);
 +    snew(edi->sav.x   ,edi->sav.nr);
 +    snew(edi->sav.x_old,edi->sav.nr);
 +    read_edx(in,edi->sav.nr,edi->sav.anrs,edi->sav.x);
 +
 +    /* Check if the same atom indices are used for reference and average positions */
 +    edi->bRefEqAv = check_if_same(edi->sref, edi->sav);
 +
 +    /* eigenvectors */
 +    read_edvecs(in,edi->sav.nr,&edi->vecs);
 +    read_edvec(in,edi->sav.nr,&edi->flood.vecs,edi->flood.bHarmonic, &bHaveReference);
 +
 +    /* target positions */
 +    edi->star.nr=read_edint(in,&bEOF);
 +    if (edi->star.nr > 0)
 +    {
 +        snew(edi->star.anrs,edi->star.nr);
 +        snew(edi->star.x   ,edi->star.nr);
 +        edi->star.sqrtm    =NULL;
 +        read_edx(in,edi->star.nr,edi->star.anrs,edi->star.x);
 +    }
 +
 +    /* positions defining origin of expansion circle */
 +    edi->sori.nr=read_edint(in,&bEOF);
 +    if (edi->sori.nr > 0)
 +    {
- static void read_edi_file(gmx_edsam_t ed, t_edpar *edi, int nr_mdatoms, t_commrec *cr)
++        if (bHaveReference)
++        {
++            /* Both an -ori structure and a at least one manual reference point have been
++             * specified. That's ambiguous and probably not intentional. */
++            gmx_fatal(FARGS, "ED: An origin structure has been provided and a at least one (moving) reference\n"
++                             "    point was manually specified in the edi file. That is ambiguous. Aborting.\n");
++        }
 +        snew(edi->sori.anrs,edi->sori.nr);
 +        snew(edi->sori.x   ,edi->sori.nr);
 +        edi->sori.sqrtm    =NULL;
 +        read_edx(in,edi->sori.nr,edi->sori.anrs,edi->sori.x);
 +    }
 +
 +    /* all done */
 +    return 1;
 +}
 +
 +
 +
 +/* Read in the edi input file. Note that it may contain several ED data sets which were
 + * achieved by concatenating multiple edi files. The standard case would be a single ED
 + * data set, though. */
-     in = gmx_fio_fopen(ed->edinam,"r");
-     fprintf(stderr, "ED: Reading edi file %s\n", ed->edinam);
++static int read_edi_file(const char *fn, t_edpar *edi, int nr_mdatoms)
 +{
 +    FILE    *in;
 +    t_edpar *curr_edi,*last_edi;
 +    t_edpar *edi_read;
 +    int     edi_nr = 0;
 +
 +
 +    /* This routine is executed on the master only */
 +
 +    /* Open the .edi parameter input file */
-     while( read_edi(in, ed, curr_edi, nr_mdatoms, edi_nr, cr) )
++    in = gmx_fio_fopen(fn,"r");
++    fprintf(stderr, "ED: Reading edi file %s\n", fn);
 +
 +    /* Now read a sequence of ED input parameter sets from the edi file */
 +    curr_edi=edi;
 +    last_edi=edi;
-         /* Make shure that the number of atoms in each dataset is the same as in the tpr file */
-         if (edi->nini != nr_mdatoms)
-             gmx_fatal(FARGS,"edi file %s (dataset #%d) was made for %d atoms, but the simulation contains %d atoms.",
-                     ed->edinam, edi_nr, edi->nini, nr_mdatoms);
++    while( read_edi(in, curr_edi, nr_mdatoms, fn) )
 +    {
 +        edi_nr++;
-         /* Keep the curr_edi pointer for the case that the next dataset is empty: */
++
 +        /* Since we arrived within this while loop we know that there is still another data set to be read in */
 +        /* We need to allocate space for the data: */
 +        snew(edi_read,1);
 +        /* Point the 'next_edi' entry to the next edi: */
 +        curr_edi->next_edi=edi_read;
-         gmx_fatal(FARGS, "No complete ED data set found in edi file %s.", ed->edinam);
++        /* Keep the curr_edi pointer for the case that the next group is empty: */
 +        last_edi = curr_edi;
 +        /* Let's prepare to read in the next edi data set: */
 +        curr_edi = edi_read;
 +    }
 +    if (edi_nr == 0)
-     /* Terminate the edi dataset list with a NULL pointer: */
++    {
++        gmx_fatal(FARGS, "No complete ED data set found in edi file %s.", fn);
++    }
 +
-     fprintf(stderr, "ED: Found %d ED dataset%s.\n", edi_nr, edi_nr>1? "s" : "");
++    /* Terminate the edi group list with a NULL pointer: */
 +    last_edi->next_edi = NULL;
 +
-     /* Allocate memory the first time this routine is called for each edi dataset */
++    fprintf(stderr, "ED: Found %d ED group%s.\n", edi_nr, edi_nr>1? "s" : "");
 +
 +    /* Close the .edi file again */
 +    gmx_fio_fclose(in);
++
++    return edi_nr;
 +}
 +
 +
 +struct t_fit_to_ref {
 +    rvec *xcopy;       /* Working copy of the positions in fit_to_reference */
 +};
 +
 +/* Fit the current positions to the reference positions
 + * Do not actually do the fit, just return rotation and translation.
 + * Note that the COM of the reference structure was already put into
 + * the origin by init_edi. */
 +static void fit_to_reference(rvec      *xcoll,    /* The positions to be fitted */
 +                             rvec      transvec,  /* The translation vector */
 +                             matrix    rotmat,    /* The rotation matrix */
 +                             t_edpar   *edi)      /* Just needed for do_edfit */
 +{
 +    rvec com;          /* center of mass */
 +    int  i;
 +    struct t_fit_to_ref *loc;
 +
 +
-         /* Loop over ED datasets (usually there is just one dataset, though) */
++    /* Allocate memory the first time this routine is called for each edi group */
 +    if (NULL == edi->buf->fit_to_ref)
 +    {
 +        snew(edi->buf->fit_to_ref, 1);
 +        snew(edi->buf->fit_to_ref->xcopy, edi->sref.nr);
 +    }
 +    loc = edi->buf->fit_to_ref;
 +
 +    /* We do not touch the original positions but work on a copy. */
 +    for (i=0; i<edi->sref.nr; i++)
++    {
 +        copy_rvec(xcoll[i], loc->xcopy[i]);
++    }
 +
 +    /* Calculate the center of mass */
 +    get_center(loc->xcopy, edi->sref.m, edi->sref.nr, com);
 +
 +    transvec[XX] = -com[XX];
 +    transvec[YY] = -com[YY];
 +    transvec[ZZ] = -com[ZZ];
 +
 +    /* Subtract the center of mass from the copy */
 +    translate_x(loc->xcopy, edi->sref.nr, transvec);
 +
 +    /* Determine the rotation matrix */
 +    do_edfit(edi->sref.nr, edi->sref.x, loc->xcopy, rotmat, edi);
 +}
 +
 +
 +static void translate_and_rotate(rvec *x,         /* The positions to be translated and rotated */
 +                                 int nat,         /* How many positions are there? */
 +                                 rvec transvec,   /* The translation vector */
 +                                 matrix rotmat)   /* The rotation matrix */
 +{
 +    /* Translation */
 +    translate_x(x, nat, transvec);
 +
 +    /* Rotation */
 +    rotate_x(x, nat, rotmat);
 +}
 +
 +
 +/* Gets the rms deviation of the positions to the structure s */
 +/* fit_to_structure has to be called before calling this routine! */
 +static real rmsd_from_structure(rvec           *x,  /* The positions under consideration */
 +                                struct gmx_edx *s)  /* The structure from which the rmsd shall be computed */
 +{
 +    real  rmsd=0.0;
 +    int   i;
 +
 +
 +    for (i=0; i < s->nr; i++)
++    {
 +        rmsd += distance2(s->x[i], x[i]);
++    }
 +
 +    rmsd /= (real) s->nr;
 +    rmsd = sqrt(rmsd);
 +
 +    return rmsd;
 +}
 +
 +
 +void dd_make_local_ed_indices(gmx_domdec_t *dd, struct gmx_edsam *ed)
 +{
 +    t_edpar *edi;
 +
 +
 +    if (ed->eEDtype != eEDnone)
 +    {
-             /* Set the pointer to the next ED dataset (if any) */
++        /* Loop over ED groups */
 +        edi=ed->edpar;
 +        while (edi)
 +        {
 +            /* Local atoms of the reference structure (for fitting), need only be assembled
 +             * if their indices differ from the average ones */
 +            if (!edi->bRefEqAv)
++            {
 +                dd_make_local_group_indices(dd->ga2la, edi->sref.nr, edi->sref.anrs,
 +                        &edi->sref.nr_loc, &edi->sref.anrs_loc, &edi->sref.nalloc_loc, edi->sref.c_ind);
++            }
 +
 +            /* Local atoms of the average structure (on these ED will be performed) */
 +            dd_make_local_group_indices(dd->ga2la, edi->sav.nr, edi->sav.anrs,
 +                    &edi->sav.nr_loc, &edi->sav.anrs_loc, &edi->sav.nalloc_loc, edi->sav.c_ind);
 +
 +            /* Indicate that the ED shift vectors for this structure need to be updated
 +             * at the next call to communicate_group_positions, since obviously we are in a NS step */
 +            edi->buf->do_edsam->bUpdateShifts = TRUE;
 +
-     } else
++            /* Set the pointer to the next ED group (if any) */
 +            edi=edi->next_edi;
 +        }
 +    }
 +}
 +
 +
 +static inline void ed_unshift_single_coord(matrix box, const rvec x, const ivec is, rvec xu)
 +{
 +    int tx,ty,tz;
 +
 +
 +    tx=is[XX];
 +    ty=is[YY];
 +    tz=is[ZZ];
 +
 +    if(TRICLINIC(box))
 +    {
 +        xu[XX] = x[XX]-tx*box[XX][XX]-ty*box[YY][XX]-tz*box[ZZ][XX];
 +        xu[YY] = x[YY]-ty*box[YY][YY]-tz*box[ZZ][YY];
 +        xu[ZZ] = x[ZZ]-tz*box[ZZ][ZZ];
- static void do_linfix(rvec *xcoll, t_edpar *edi, int step, t_commrec *cr)
++    }
++    else
 +    {
 +        xu[XX] = x[XX]-tx*box[XX][XX];
 +        xu[YY] = x[YY]-ty*box[YY][YY];
 +        xu[ZZ] = x[ZZ]-tz*box[ZZ][ZZ];
 +    }
 +}
 +
 +
- static void do_linacc(rvec *xcoll, t_edpar *edi, t_commrec *cr)
++static void do_linfix(rvec *xcoll, t_edpar *edi, gmx_large_int_t step)
 +{
 +    int  i, j;
 +    real proj, add;
 +    rvec vec_dum;
 +
 +
 +    /* loop over linfix vectors */
 +    for (i=0; i<edi->vecs.linfix.neig; i++)
 +    {
 +        /* calculate the projection */
 +        proj = projectx(edi, xcoll, edi->vecs.linfix.vec[i]);
 +
 +        /* calculate the correction */
 +        add = edi->vecs.linfix.refproj[i] + step*edi->vecs.linfix.stpsz[i] - proj;
 +
 +        /* apply the correction */
 +        add /= edi->sav.sqrtm[i];
 +        for (j=0; j<edi->sav.nr; j++)
 +        {
 +            svmul(add, edi->vecs.linfix.vec[i][j], vec_dum);
 +            rvec_inc(xcoll[j], vec_dum);
 +        }
 +    }
 +}
 +
 +
- static void do_radfix(rvec *xcoll, t_edpar *edi, int step, t_commrec *cr)
++static void do_linacc(rvec *xcoll, t_edpar *edi)
 +{
 +    int  i, j;
 +    real proj, add;
 +    rvec vec_dum;
 +
 +
 +    /* loop over linacc vectors */
 +    for (i=0; i<edi->vecs.linacc.neig; i++)
 +    {
 +        /* calculate the projection */
 +        proj=projectx(edi, xcoll, edi->vecs.linacc.vec[i]);
 +
 +        /* calculate the correction */
 +        add = 0.0;
 +        if (edi->vecs.linacc.stpsz[i] > 0.0)
 +        {
 +            if ((proj-edi->vecs.linacc.refproj[i]) < 0.0)
++            {
 +                add = edi->vecs.linacc.refproj[i] - proj;
++            }
 +        }
 +        if (edi->vecs.linacc.stpsz[i] < 0.0)
 +        {
 +            if ((proj-edi->vecs.linacc.refproj[i]) > 0.0)
++            {
 +                add = edi->vecs.linacc.refproj[i] - proj;
++            }
 +        }
 +
 +        /* apply the correction */
 +        add /= edi->sav.sqrtm[i];
 +        for (j=0; j<edi->sav.nr; j++)
 +        {
 +            svmul(add, edi->vecs.linacc.vec[i][j], vec_dum);
 +            rvec_inc(xcoll[j], vec_dum);
 +        }
 +
 +        /* new positions will act as reference */
 +        edi->vecs.linacc.refproj[i] = proj + add;
 +    }
 +}
 +
 +
-         for (j=0; j<edi->sav.nr; j++) {
++static void do_radfix(rvec *xcoll, t_edpar *edi)
 +{
 +    int  i,j;
 +    real *proj, rad=0.0, ratio;
 +    rvec vec_dum;
 +
 +
 +    if (edi->vecs.radfix.neig == 0)
 +        return;
 +
 +    snew(proj, edi->vecs.radfix.neig);
 +
 +    /* loop over radfix vectors */
 +    for (i=0; i<edi->vecs.radfix.neig; i++)
 +    {
 +        /* calculate the projections, radius */
 +        proj[i] = projectx(edi, xcoll, edi->vecs.radfix.vec[i]);
 +        rad += pow(proj[i] - edi->vecs.radfix.refproj[i], 2);
 +    }
 +
 +    rad   = sqrt(rad);
 +    ratio = (edi->vecs.radfix.stpsz[0]+edi->vecs.radfix.radius)/rad - 1.0;
 +    edi->vecs.radfix.radius += edi->vecs.radfix.stpsz[0];
 +
 +    /* loop over radfix vectors */
 +    for (i=0; i<edi->vecs.radfix.neig; i++)
 +    {
 +        proj[i] -= edi->vecs.radfix.refproj[i];
 +
 +        /* apply the correction */
 +        proj[i] /= edi->sav.sqrtm[i];
 +        proj[i] *= ratio;
- static void do_radacc(rvec *xcoll, t_edpar *edi, t_commrec *cr)
++        for (j=0; j<edi->sav.nr; j++)
++        {
 +            svmul(proj[i], edi->vecs.radfix.vec[i][j], vec_dum);
 +            rvec_inc(xcoll[j], vec_dum);
 +        }
 +    }
 +
 +    sfree(proj);
 +}
 +
 +
- static void do_radcon(rvec *xcoll, t_edpar *edi, t_commrec *cr)
++static void do_radacc(rvec *xcoll, t_edpar *edi)
 +{
 +    int  i,j;
 +    real *proj, rad=0.0, ratio=0.0;
 +    rvec vec_dum;
 +
 +
 +    if (edi->vecs.radacc.neig == 0)
 +        return;
 +
 +    snew(proj,edi->vecs.radacc.neig);
 +
 +    /* loop over radacc vectors */
 +    for (i=0; i<edi->vecs.radacc.neig; i++)
 +    {
 +        /* calculate the projections, radius */
 +        proj[i] = projectx(edi, xcoll, edi->vecs.radacc.vec[i]);
 +        rad += pow(proj[i] - edi->vecs.radacc.refproj[i], 2);
 +    }
 +    rad = sqrt(rad);
 +
 +    /* only correct when radius decreased */
 +    if (rad < edi->vecs.radacc.radius)
 +    {
 +        ratio = edi->vecs.radacc.radius/rad - 1.0;
 +        rad   = edi->vecs.radacc.radius;
 +    }
 +    else
 +        edi->vecs.radacc.radius = rad;
 +
 +    /* loop over radacc vectors */
 +    for (i=0; i<edi->vecs.radacc.neig; i++)
 +    {
 +        proj[i] -= edi->vecs.radacc.refproj[i];
 +
 +        /* apply the correction */
 +        proj[i] /= edi->sav.sqrtm[i];
 +        proj[i] *= ratio;
 +        for (j=0; j<edi->sav.nr; j++)
 +        {
 +            svmul(proj[i], edi->vecs.radacc.vec[i][j], vec_dum);
 +            rvec_inc(xcoll[j], vec_dum);
 +        }
 +    }
 +    sfree(proj);
 +}
 +
 +
 +struct t_do_radcon {
 +    real *proj;
 +};
 +
++static void do_radcon(rvec *xcoll, t_edpar *edi)
 +{
 +    int  i,j;
 +    real rad=0.0, ratio=0.0;
 +    struct t_do_radcon *loc;
 +    gmx_bool bFirst;
 +    rvec vec_dum;
 +
 +
 +    if(edi->buf->do_radcon != NULL)
 +    {
 +        bFirst = FALSE;
 +        loc    = edi->buf->do_radcon;
 +    }
 +    else
 +    {
 +        bFirst = TRUE;
 +        snew(edi->buf->do_radcon, 1);
 +    }
 +    loc = edi->buf->do_radcon;
 +
 +    if (edi->vecs.radcon.neig == 0)
++    {
 +        return;
- static void ed_apply_constraints(rvec *xcoll, t_edpar *edi, gmx_large_int_t step, t_commrec *cr)
++    }
++    
 +    if (bFirst)
++    {
 +        snew(loc->proj, edi->vecs.radcon.neig);
++    }
 +
 +    /* loop over radcon vectors */
 +    for (i=0; i<edi->vecs.radcon.neig; i++)
 +    {
 +        /* calculate the projections, radius */
 +        loc->proj[i] = projectx(edi, xcoll, edi->vecs.radcon.vec[i]);
 +        rad += pow(loc->proj[i] - edi->vecs.radcon.refproj[i], 2);
 +    }
 +    rad = sqrt(rad);
 +    /* only correct when radius increased */
 +    if (rad > edi->vecs.radcon.radius)
 +    {
 +        ratio = edi->vecs.radcon.radius/rad - 1.0;
 +
 +        /* loop over radcon vectors */
 +        for (i=0; i<edi->vecs.radcon.neig; i++)
 +        {
 +            /* apply the correction */
 +            loc->proj[i] -= edi->vecs.radcon.refproj[i];
 +            loc->proj[i] /= edi->sav.sqrtm[i];
 +            loc->proj[i] *= ratio;
 +
 +            for (j=0; j<edi->sav.nr; j++)
 +            {
 +                svmul(loc->proj[i], edi->vecs.radcon.vec[i][j], vec_dum);
 +                rvec_inc(xcoll[j], vec_dum);
 +            }
 +        }
 +    }
 +    else
 +        edi->vecs.radcon.radius = rad;
 +
 +    if (rad != edi->vecs.radcon.radius)
 +    {
 +        rad = 0.0;
 +        for (i=0; i<edi->vecs.radcon.neig; i++)
 +        {
 +            /* calculate the projections, radius */
 +            loc->proj[i] = projectx(edi, xcoll, edi->vecs.radcon.vec[i]);
 +            rad += pow(loc->proj[i] - edi->vecs.radcon.refproj[i], 2);
 +        }
 +        rad = sqrt(rad);
 +    }
 +}
 +
 +
-         do_linfix(xcoll, edi, step, cr);
-     do_linacc(xcoll, edi, cr);
++static void ed_apply_constraints(rvec *xcoll, t_edpar *edi, gmx_large_int_t step)
 +{
 +    int i;
 +
 +
 +    /* subtract the average positions */
 +    for (i=0; i<edi->sav.nr; i++)
++    {
 +        rvec_dec(xcoll[i], edi->sav.x[i]);
++    }
 +
 +    /* apply the constraints */
 +    if (step >= 0)
-         do_radfix(xcoll, edi, step, cr);
-     do_radacc(xcoll, edi, cr);
-     do_radcon(xcoll, edi, cr);
++    {
++        do_linfix(xcoll, edi, step);
++    }
++    do_linacc(xcoll, edi);
 +    if (step >= 0)
- /* Write out the projections onto the eigenvectors */
- static void write_edo(int nr_edi, t_edpar *edi, gmx_edsam_t ed, gmx_large_int_t step,real rmsd)
++    {
++        do_radfix(xcoll, edi);
++    }
++    do_radacc(xcoll, edi);
++    do_radcon(xcoll, edi);
 +
 +    /* add back the average positions */
 +    for (i=0; i<edi->sav.nr; i++)
++    {
 +        rvec_inc(xcoll[i], edi->sav.x[i]);
++    }
 +}
 +
 +
-     char buf[22];
++/* Write out the projections onto the eigenvectors. The order of output
++ * corresponds to ed_output_legend() */
++static void write_edo(t_edpar *edi, FILE *fp,real rmsd)
 +{
 +    int i;
-     if (edi->bNeedDoEdsam)
 +
 +
-         if (step == -1)
-             fprintf(ed->edo, "Initial projections:\n");
-         else
-         {
-             fprintf(ed->edo,"Step %s, ED #%d  ", gmx_step_str(step, buf), nr_edi);
-             fprintf(ed->edo,"  RMSD %f nm\n",rmsd);
-         }
++    /* Output how well we fit to the reference structure */
++    fprintf(fp, EDcol_ffmt, rmsd);
++
++    for (i=0; i<edi->vecs.mon.neig; i++)
 +    {
-         if (edi->vecs.mon.neig)
-         {
-             fprintf(ed->edo,"  Monitor eigenvectors");
-             for (i=0; i<edi->vecs.mon.neig; i++)
-                 fprintf(ed->edo," %d: %12.5e ",edi->vecs.mon.ieig[i],edi->vecs.mon.xproj[i]);
-             fprintf(ed->edo,"\n");
-         }
-         if (edi->vecs.linfix.neig)
-         {
-             fprintf(ed->edo,"  Linfix  eigenvectors");
-             for (i=0; i<edi->vecs.linfix.neig; i++)
-                 fprintf(ed->edo," %d: %12.5e ",edi->vecs.linfix.ieig[i],edi->vecs.linfix.xproj[i]);
-             fprintf(ed->edo,"\n");
-         }
-         if (edi->vecs.linacc.neig)
-         {
-             fprintf(ed->edo,"  Linacc  eigenvectors");
-             for (i=0; i<edi->vecs.linacc.neig; i++)
-                 fprintf(ed->edo," %d: %12.5e ",edi->vecs.linacc.ieig[i],edi->vecs.linacc.xproj[i]);
-             fprintf(ed->edo,"\n");
-         }
-         if (edi->vecs.radfix.neig)
-         {
-             fprintf(ed->edo,"  Radfix  eigenvectors");
-             for (i=0; i<edi->vecs.radfix.neig; i++)
-                 fprintf(ed->edo," %d: %12.5e ",edi->vecs.radfix.ieig[i],edi->vecs.radfix.xproj[i]);
-             fprintf(ed->edo,"\n");
-             fprintf(ed->edo,"  fixed increment radius = %f\n", calc_radius(&edi->vecs.radfix));
-         }
-         if (edi->vecs.radacc.neig)
-         {
-             fprintf(ed->edo,"  Radacc  eigenvectors");
-             for (i=0; i<edi->vecs.radacc.neig; i++)
-                 fprintf(ed->edo," %d: %12.5e ",edi->vecs.radacc.ieig[i],edi->vecs.radacc.xproj[i]);
-             fprintf(ed->edo,"\n");
-             fprintf(ed->edo,"  acceptance radius      = %f\n", calc_radius(&edi->vecs.radacc));
-         }
-         if (edi->vecs.radcon.neig)
-         {
-             fprintf(ed->edo,"  Radcon  eigenvectors");
-             for (i=0; i<edi->vecs.radcon.neig; i++)
-                 fprintf(ed->edo," %d: %12.5e ",edi->vecs.radcon.ieig[i],edi->vecs.radcon.xproj[i]);
-             fprintf(ed->edo,"\n");
-             fprintf(ed->edo,"  contracting radius     = %f\n", calc_radius(&edi->vecs.radcon));
-         }
++        fprintf(fp, EDcol_efmt, edi->vecs.mon.xproj[i]);
++    }
 +
-                 matrix      box)     /* the box                            */
++    for (i=0; i<edi->vecs.linfix.neig; i++)
++    {
++        fprintf(fp, EDcol_efmt, edi->vecs.linfix.xproj[i]);
++    }
++
++    for (i=0; i<edi->vecs.linacc.neig; i++)
++    {
++        fprintf(fp, EDcol_efmt, edi->vecs.linacc.xproj[i]);
++    }
++
++    for (i=0; i<edi->vecs.radfix.neig; i++)
++    {
++        fprintf(fp, EDcol_efmt, edi->vecs.radfix.xproj[i]);
++    }
++    if (edi->vecs.radfix.neig)
++    {
++        fprintf(fp, EDcol_ffmt, calc_radius(&edi->vecs.radfix)); /* fixed increment radius */
++    }
++
++    for (i=0; i<edi->vecs.radacc.neig; i++)
++    {
++        fprintf(fp, EDcol_efmt, edi->vecs.radacc.xproj[i]);
++    }
++    if (edi->vecs.radacc.neig)
++    {
++        fprintf(fp, EDcol_ffmt, calc_radius(&edi->vecs.radacc)); /* acceptance radius */
++    }
++
++    for (i=0; i<edi->vecs.radcon.neig; i++)
++    {
++        fprintf(fp, EDcol_efmt, edi->vecs.radcon.xproj[i]);
++    }
++    if (edi->vecs.radcon.neig)
++    {
++        fprintf(fp, EDcol_ffmt, calc_radius(&edi->vecs.radcon)); /* contracting radius */
 +    }
 +}
 +
 +/* Returns if any constraints are switched on */
 +static int ed_constraints(gmx_bool edtype, t_edpar *edi)
 +{
 +    if (edtype == eEDedsam || edtype == eEDflood)
 +    {
 +        return (edi->vecs.linfix.neig || edi->vecs.linacc.neig ||
 +                edi->vecs.radfix.neig || edi->vecs.radacc.neig ||
 +                edi->vecs.radcon.neig);
 +    }
 +    return 0;
 +}
 +
 +
 +/* Copies reference projection 'refproj' to fixed 'refproj0' variable for flooding/
 + * umbrella sampling simulations. */
 +static void copyEvecReference(t_eigvec* floodvecs)
 +{
 +    int i;
 +
 +
 +    if (NULL==floodvecs->refproj0)
++    {
 +        snew(floodvecs->refproj0, floodvecs->neig);
++    }
 +
 +    for (i=0; i<floodvecs->neig; i++)
 +    {
 +        floodvecs->refproj0[i] = floodvecs->refproj[i];
 +    }
 +}
 +
 +
++/* Call on MASTER only. Check whether the essential dynamics / flooding
++ * groups of the checkpoint file are consistent with the provided .edi file. */
++static void crosscheck_edi_file_vs_checkpoint(gmx_edsam_t ed, edsamstate_t *EDstate)
++{
++    t_edpar *edi = NULL;    /* points to a single edi data set */
++    int edinum;
++
++
++    if (NULL == EDstate->nref || NULL == EDstate->nav)
++    {
++        gmx_fatal(FARGS, "Essential dynamics and flooding can only be switched on (or off) at the\n"
++                         "start of a new simulation. If a simulation runs with/without ED constraints,\n"
++                         "it must also continue with/without ED constraints when checkpointing.\n"
++                         "To switch on (or off) ED constraints, please prepare a new .tpr to start\n"
++                         "from without a checkpoint.\n");
++    }
++
++    edi=ed->edpar;
++    edinum = 0;
++    while(edi != NULL)
++    {
++        /* Check number of atoms in the reference and average structures */
++        if (EDstate->nref[edinum] != edi->sref.nr)
++        {
++            gmx_fatal(FARGS, "The number of reference structure atoms in ED group %c is\n"
++                             "not the same in .cpt (NREF=%d) and .edi (NREF=%d) files!\n",
++                    get_EDgroupChar(edinum+1, 0), EDstate->nref[edinum], edi->sref.nr);
++        }
++        if (EDstate->nav[edinum] != edi->sav.nr)
++        {
++            gmx_fatal(FARGS, "The number of average structure atoms in ED group %c is\n"
++                             "not the same in .cpt (NREF=%d) and .edi (NREF=%d) files!\n",
++                    get_EDgroupChar(edinum+1, 0), EDstate->nav[edinum], edi->sav.nr);
++        }
++        edi=edi->next_edi;
++        edinum++;
++    }
++
++    if (edinum != EDstate->nED)
++    {
++        gmx_fatal(FARGS, "The number of essential dynamics / flooding groups is not consistent.\n"
++                         "There are %d ED groups in the .cpt file, but %d in the .edi file!\n"
++                         "Are you sure this is the correct .edi file?\n", EDstate->nED, edinum);
++    }
++}
++
++
++/* The edsamstate struct stores the information we need to make the ED group
++ * whole again after restarts from a checkpoint file. Here we do the following:
++ * a) If we did not start from .cpt, we prepare the struct for proper .cpt writing,
++ * b) if we did start from .cpt, we copy over the last whole structures from .cpt,
++ * c) in any case, for subsequent checkpoint writing, we set the pointers in
++ * edsamstate to the x_old arrays, which contain the correct PBC representation of
++ * all ED structures at the last time step. */
++static void init_edsamstate(gmx_edsam_t ed, edsamstate_t *EDstate)
++{
++    int     i, nr_edi;
++    t_edpar *edi;
++
++
++    snew(EDstate->old_sref_p, EDstate->nED);
++    snew(EDstate->old_sav_p , EDstate->nED);
++
++    /* If we did not read in a .cpt file, these arrays are not yet allocated */
++    if (!EDstate->bFromCpt)
++    {
++        snew(EDstate->nref, EDstate->nED);
++        snew(EDstate->nav , EDstate->nED);
++    }
++
++    /* Loop over all ED/flooding data sets (usually only one, though) */
++    edi = ed->edpar;
++    for (nr_edi = 1; nr_edi <= EDstate->nED; nr_edi++)
++    {
++        /* We always need the last reference and average positions such that
++         * in the next time step we can make the ED group whole again
++         * if the atoms do not have the correct PBC representation */
++        if (EDstate->bFromCpt)
++        {
++            /* Copy the last whole positions of reference and average group from .cpt */
++            for (i=0; i<edi->sref.nr; i++)
++            {
++                copy_rvec(EDstate->old_sref[nr_edi-1][i], edi->sref.x_old[i]);
++            }
++            for (i=0; i<edi->sav.nr ; i++)
++            {
++                copy_rvec(EDstate->old_sav [nr_edi-1][i], edi->sav.x_old [i]);
++            }
++        }
++        else
++        {
++            EDstate->nref[nr_edi-1] = edi->sref.nr;
++            EDstate->nav [nr_edi-1] = edi->sav.nr;
++        }
++
++        /* For subsequent checkpoint writing, set the edsamstate pointers to the edi arrays: */
++        EDstate->old_sref_p[nr_edi-1] = edi->sref.x_old;
++        EDstate->old_sav_p [nr_edi-1] = edi->sav.x_old ;
++
++        edi = edi->next_edi;
++    }
++}
++
++
++/* Adds 'buf' to 'str' */
++static void add_to_string(char **str, char *buf)
++{
++    int len;
++
++
++    len = strlen(*str) + strlen(buf) + 1;
++    srenew(*str, len);
++    strcat(*str, buf);
++}
++
++
++static void add_to_string_aligned(char **str, char *buf)
++{
++    char buf_aligned[STRLEN];
++
++    sprintf(buf_aligned, EDcol_sfmt, buf);
++    add_to_string(str, buf_aligned);
++}
++
++
++static void nice_legend(const char ***setname, int *nsets, char **LegendStr, char *value, char *unit, char EDgroupchar)
++{
++    char tmp[STRLEN], tmp2[STRLEN];
++
++
++    sprintf(tmp, "%c %s", EDgroupchar, value);
++    add_to_string_aligned(LegendStr, tmp);
++    sprintf(tmp2, "%s (%s)", tmp, unit);
++    (*setname)[*nsets] = strdup(tmp2);
++    (*nsets)++;
++}
++
++
++static void nice_legend_evec(const char ***setname, int *nsets, char **LegendStr, t_eigvec *evec, char EDgroupChar, const char *EDtype)
++{
++    int i;
++    char tmp[STRLEN];
++
++
++    for (i=0; i<evec->neig; i++)
++    {
++        sprintf(tmp, "EV%dprj%s", evec->ieig[i], EDtype);
++        nice_legend(setname, nsets, LegendStr, tmp, "nm", EDgroupChar);
++    }
++}
++
++
++/* Makes a legend for the xvg output file. Call on MASTER only! */
++static void write_edo_legend(gmx_edsam_t ed, int nED, const output_env_t oenv)
++{
++    t_edpar    *edi = NULL;
++    int        i;
++    int        nr_edi, nsets, n_flood, n_edsam;
++    const char **setname;
++    char       buf[STRLEN];
++    char       *LegendStr=NULL;
++
++
++    edi         = ed->edpar;
++
++    fprintf(ed->edo, "# Output will be written every %d step%s\n", ed->edpar->outfrq, ed->edpar->outfrq != 1 ? "s":"");
++
++    for (nr_edi = 1; nr_edi <= nED; nr_edi++)
++    {
++        fprintf(ed->edo, "#\n");
++        fprintf(ed->edo, "# Summary of applied con/restraints for the ED group %c\n", get_EDgroupChar(nr_edi, nED));
++        fprintf(ed->edo, "# Atoms in average structure: %d\n", edi->sav.nr);
++        fprintf(ed->edo, "#    monitor  : %d vec%s\n" , edi->vecs.mon.neig   , edi->vecs.mon.neig    != 1 ? "s":"");
++        fprintf(ed->edo, "#    LINFIX   : %d vec%s\n" , edi->vecs.linfix.neig, edi->vecs.linfix.neig != 1 ? "s":"");
++        fprintf(ed->edo, "#    LINACC   : %d vec%s\n" , edi->vecs.linacc.neig, edi->vecs.linacc.neig != 1 ? "s":"");
++        fprintf(ed->edo, "#    RADFIX   : %d vec%s\n" , edi->vecs.radfix.neig, edi->vecs.radfix.neig != 1 ? "s":"");
++        fprintf(ed->edo, "#    RADACC   : %d vec%s\n" , edi->vecs.radacc.neig, edi->vecs.radacc.neig != 1 ? "s":"");
++        fprintf(ed->edo, "#    RADCON   : %d vec%s\n" , edi->vecs.radcon.neig, edi->vecs.radcon.neig != 1 ? "s":"");
++        fprintf(ed->edo, "#    FLOODING : %d vec%s  " , edi->flood.vecs.neig , edi->flood.vecs.neig  != 1 ? "s":"");
++
++        if (edi->flood.vecs.neig)
++        {
++            /* If in any of the groups we find a flooding vector, flooding is turned on */
++            ed->eEDtype = eEDflood;
++
++            /* Print what flavor of flooding we will do */
++            if (0 == edi->flood.tau) /* constant flooding strength */
++            {
++                fprintf(ed->edo, "Efl_null = %g", edi->flood.constEfl);
++                if (edi->flood.bHarmonic)
++                {
++                    fprintf(ed->edo, ", harmonic");
++                }
++            }
++            else /* adaptive flooding */
++            {
++                fprintf(ed->edo, ", adaptive");
++            }
++        }
++        fprintf(ed->edo, "\n");
++
++        edi = edi->next_edi;
++    }
++
++    /* Print a nice legend */
++    snew(LegendStr, 1);
++    LegendStr[0] = '\0';
++    sprintf(buf, "#     %6s", "time");
++    add_to_string(&LegendStr, buf);
++
++    /* Calculate the maximum number of columns we could end up with */
++    edi     = ed->edpar;
++    nsets = 0;
++    for (nr_edi = 1; nr_edi <= nED; nr_edi++)
++    {
++        nsets += 5 +edi->vecs.mon.neig
++                   +edi->vecs.linfix.neig
++                   +edi->vecs.linacc.neig
++                   +edi->vecs.radfix.neig
++                   +edi->vecs.radacc.neig
++                   +edi->vecs.radcon.neig
++                + 6*edi->flood.vecs.neig;
++        edi = edi->next_edi;
++    }
++    snew(setname, nsets);
++
++    /* In the mdrun time step in a first function call (do_flood()) the flooding 
++     * forces are calculated and in a second function call (do_edsam()) the 
++     * ED constraints. To get a corresponding legend, we need to loop twice
++     * over the edi groups and output first the flooding, then the ED part */
++    
++    /* The flooding-related legend entries, if flooding is done */
++    nsets = 0;
++    if (eEDflood == ed->eEDtype)
++    {
++        edi   = ed->edpar;
++        for (nr_edi = 1; nr_edi <= nED; nr_edi++)
++        {
++            /* Always write out the projection on the flooding EVs. Of course, this can also
++             * be achieved with the monitoring option in do_edsam() (if switched on by the
++             * user), but in that case the positions need to be communicated in do_edsam(),
++             * which is not necessary when doing flooding only. */
++            nice_legend(&setname, &nsets, &LegendStr, "RMSD to ref", "nm", get_EDgroupChar(nr_edi, nED) );
++
++            for (i=0; i<edi->flood.vecs.neig; i++)
++            {
++                sprintf(buf, "EV%dprjFLOOD", edi->flood.vecs.ieig[i]);
++                nice_legend(&setname, &nsets, &LegendStr, buf, "nm", get_EDgroupChar(nr_edi, nED));
++
++                /* Output the current reference projection if it changes with time;
++                 * this can happen when flooding is used as harmonic restraint */
++                if (edi->flood.bHarmonic && edi->flood.vecs.refprojslope[i] != 0.0)
++                {
++                    sprintf(buf, "EV%d ref.prj.", edi->flood.vecs.ieig[i]);
++                    nice_legend(&setname, &nsets, &LegendStr, buf, "nm", get_EDgroupChar(nr_edi, nED));
++                }
++
++                /* For flooding we also output Efl, Vfl, deltaF, and the flooding forces */
++                if (0 != edi->flood.tau) /* only output Efl for adaptive flooding (constant otherwise) */
++                {
++                    sprintf(buf, "EV%d-Efl", edi->flood.vecs.ieig[i]);
++                    nice_legend(&setname, &nsets, &LegendStr, buf, "kJ/mol", get_EDgroupChar(nr_edi, nED));
++                }
++
++                sprintf(buf, "EV%d-Vfl", edi->flood.vecs.ieig[i]);
++                nice_legend(&setname, &nsets, &LegendStr, buf, "kJ/mol", get_EDgroupChar(nr_edi, nED));
++
++                if (0 != edi->flood.tau) /* only output deltaF for adaptive flooding (zero otherwise) */
++                {
++                    sprintf(buf, "EV%d-deltaF", edi->flood.vecs.ieig[i]);
++                    nice_legend(&setname, &nsets, &LegendStr, buf, "kJ/mol", get_EDgroupChar(nr_edi, nED));
++                }
++
++                sprintf(buf, "EV%d-FLforces", edi->flood.vecs.ieig[i]);
++                nice_legend(&setname, &nsets, &LegendStr, buf, "kJ/mol/nm", get_EDgroupChar(nr_edi, nED));
++            }
++
++            edi = edi->next_edi;
++        } /* End of flooding-related legend entries */
++    }
++    n_flood = nsets;
++
++    /* Now the ED-related entries, if essential dynamics is done */
++    edi         = ed->edpar;
++    for (nr_edi = 1; nr_edi <= nED; nr_edi++)
++    {
++        nice_legend(&setname, &nsets, &LegendStr, "RMSD to ref", "nm", get_EDgroupChar(nr_edi, nED) );
++
++        /* Essential dynamics, projections on eigenvectors */
++        nice_legend_evec(&setname, &nsets, &LegendStr, &edi->vecs.mon   , get_EDgroupChar(nr_edi, nED), "MON"   );
++        nice_legend_evec(&setname, &nsets, &LegendStr, &edi->vecs.linfix, get_EDgroupChar(nr_edi, nED), "LINFIX");
++        nice_legend_evec(&setname, &nsets, &LegendStr, &edi->vecs.linacc, get_EDgroupChar(nr_edi, nED), "LINACC");
++        nice_legend_evec(&setname, &nsets, &LegendStr, &edi->vecs.radfix, get_EDgroupChar(nr_edi, nED), "RADFIX");
++        if (edi->vecs.radfix.neig)
++        {
++            nice_legend(&setname, &nsets, &LegendStr, "RADFIX radius", "nm", get_EDgroupChar(nr_edi, nED));
++        }
++        nice_legend_evec(&setname, &nsets, &LegendStr, &edi->vecs.radacc, get_EDgroupChar(nr_edi, nED), "RADACC");
++        if (edi->vecs.radacc.neig)
++        {
++            nice_legend(&setname, &nsets, &LegendStr, "RADACC radius", "nm", get_EDgroupChar(nr_edi, nED));
++        }
++        nice_legend_evec(&setname, &nsets, &LegendStr, &edi->vecs.radcon, get_EDgroupChar(nr_edi, nED), "RADCON");
++        if (edi->vecs.radcon.neig)
++        {
++            nice_legend(&setname, &nsets, &LegendStr, "RADCON radius", "nm", get_EDgroupChar(nr_edi, nED));
++        }
++
++        edi = edi->next_edi;
++    } /* end of 'pure' essential dynamics legend entries */
++    n_edsam = nsets - n_flood;
++
++    xvgr_legend(ed->edo, nsets, setname, oenv);
++    sfree(setname);
++
++    fprintf(ed->edo, "#\n"
++                     "# Legend for %d column%s of flooding plus %d column%s of essential dynamics data:\n",
++                     n_flood, 1 == n_flood ? "":"s", 
++                     n_edsam, 1 == n_edsam ? "":"s");
++    fprintf(ed->edo, "%s", LegendStr);
++    sfree(LegendStr);
++    
++    fflush(ed->edo);
++}
++
++
 +void init_edsam(gmx_mtop_t  *mtop,   /* global topology                    */
 +                t_inputrec  *ir,     /* input record                       */
 +                t_commrec   *cr,     /* communication record               */
 +                gmx_edsam_t ed,      /* contains all ED data               */
 +                rvec        x[],     /* positions of the whole MD system   */
-     int     numedis=0;      /* keep track of the number of ED data sets in edi file */
++                matrix      box,     /* the box                            */
++                edsamstate_t *EDstate)
 +{
 +    t_edpar *edi = NULL;    /* points to a single edi data set */
-     rvec    *xfit   = NULL; /* the positions which will be fitted to the reference structure  */
-     rvec    *xstart = NULL; /* the positions which are subject to ED sampling */
 +    int     i,nr_edi,avindex;
 +    rvec    *x_pbc  = NULL; /* positions of the whole MD system with pbc removed  */
-     ed->bFirst = 1;
++    rvec    *xfit=NULL, *xstart=NULL; /* dummy arrays to determine initial RMSDs  */
 +    rvec    fit_transvec;   /* translation ... */
 +    matrix  fit_rotmat;     /* ... and rotation from fit to reference structure */
 +
 +
 +    if (!DOMAINDECOMP(cr) && PAR(cr) && MASTER(cr))
++    {
 +        gmx_fatal(FARGS, "Please switch on domain decomposition to use essential dynamics in parallel.");
++    }
 +
 +    if (MASTER(cr))
++    {
 +        fprintf(stderr, "ED: Initializing essential dynamics constraints.\n");
 +
++        if (NULL == ed)
++        {
++            gmx_fatal(FARGS, "The checkpoint file you provided is from an essential dynamics or\n"
++                             "flooding simulation. Please also provide the correct .edi file with -ei.\n");
++        }
++    }
++
 +    /* Needed for initializing radacc radius in do_edsam */
-         snew(ed->edpar,1);
-         /* Read the whole edi file at once: */
-         read_edi_file(ed,ed->edpar,mtop->natoms,cr);
-         /* Initialization for every ED/flooding dataset. Flooding uses one edi dataset per
++    ed->bFirst = TRUE;
 +
 +    /* The input file is read by the master and the edi structures are
 +     * initialized here. Input is stored in ed->edpar. Then the edi
 +     * structures are transferred to the other nodes */
 +    if (MASTER(cr))
 +    {
-             init_edi(mtop,ir,cr,ed,edi);
-             /* Init flooding parameters if needed */
-             init_flood(edi,ed,ir->delta_t,cr);
++        /* Initialization for every ED/flooding group. Flooding uses one edi group per
 +         * flooding vector, Essential dynamics can be applied to more than one structure
 +         * as well, but will be done in the order given in the edi file, so
 +         * expect different results for different order of edi file concatenation! */
 +        edi=ed->edpar;
 +        while(edi != NULL)
 +        {
-             numedis++;
++            init_edi(mtop,edi);
++            init_flood(edi,ed,ir->delta_t);
 +            edi=edi->next_edi;
 +        }
 +    }
 +
 +    /* The master does the work here. The other nodes get the positions
 +     * not before dd_partition_system which is called after init_edsam */
 +    if (MASTER(cr))
 +    {
 +        /* Remove pbc, make molecule whole.
 +         * When ir->bContinuation=TRUE this has already been done, but ok.
 +         */
 +        snew(x_pbc,mtop->natoms);
 +        m_rveccopy(mtop->natoms,x,x_pbc);
 +        do_pbc_first_mtop(NULL,ir->ePBC,box,mtop,x_pbc);
 +
 +        /* Reset pointer to first ED data set which contains the actual ED data */
 +        edi=ed->edpar;
-         for (nr_edi = 1; nr_edi <= numedis; nr_edi++)
 +        /* Loop over all ED/flooding data sets (usually only one, though) */
-             /* We use srenew to allocate memory since the size of the buffers
-              * is likely to change with every ED dataset */
-             srenew(xfit  , edi->sref.nr );
-             srenew(xstart, edi->sav.nr  );
-             /* Extract the positions of the atoms to which will be fitted */
-             for (i=0; i < edi->sref.nr; i++)
++        for (nr_edi = 1; nr_edi <= EDstate->nED; nr_edi++)
 +        {
-                 copy_rvec(x_pbc[edi->sref.anrs[i]], xfit[i]);
-                 /* Save the sref positions such that in the next time step we can make the ED group whole
-                  * in case any of the atoms do not have the correct PBC representation */
-                 copy_rvec(xfit[i], edi->sref.x_old[i]);
++            /* For multiple ED groups we use the output frequency that was specified
++             * in the first set */
++            if (nr_edi > 1)
 +            {
-             /* Extract the positions of the atoms subject to ED sampling */
-             for (i=0; i < edi->sav.nr; i++)
++                edi->outfrq = ed->edpar->outfrq;
 +            }
 +
-                 copy_rvec(x_pbc[edi->sav.anrs[i]], xstart[i]);
++            /* Extract the initial reference and average positions. When starting
++             * from .cpt, these have already been read into sref.x_old
++             * in init_edsamstate() */
++            if (!EDstate->bFromCpt)
 +            {
-                 /* Save the sav positions such that in the next time step we can make the ED group whole
-                  * in case any of the atoms do not have the correct PBC representation */
-                 copy_rvec(xstart[i], edi->sav.x_old[i]);
++                /* If this is the first run (i.e. no checkpoint present) we assume
++                 * that the starting positions give us the correct PBC representation */
++                for (i=0; i < edi->sref.nr; i++)
++                {
++                    copy_rvec(x_pbc[edi->sref.anrs[i]], edi->sref.x_old[i]);
++                }
 +
-             fprintf(stderr, "ED: Initial RMSD from reference after fit = %f nm (dataset #%d)\n",
-                     rmsd_from_structure(xfit, &edi->sref), nr_edi);
++                for (i=0; i < edi->sav.nr; i++)
++                {
++                    copy_rvec(x_pbc[edi->sav.anrs[i]], edi->sav.x_old[i]);
++                }
 +            }
 +
++            /* Now we have the PBC-correct start positions of the reference and
++               average structure. We copy that over to dummy arrays on which we
++               can apply fitting to print out the RMSD. We srenew the memory since
++               the size of the buffers is likely different for every ED group */
++            srenew(xfit  , edi->sref.nr );
++            srenew(xstart, edi->sav.nr  );
++            copy_rvecn(edi->sref.x_old, xfit, 0, edi->sref.nr);
++            copy_rvecn(edi->sav.x_old, xstart, 0, edi->sav.nr);
++
 +            /* Make the fit to the REFERENCE structure, get translation and rotation */
 +            fit_to_reference(xfit, fit_transvec, fit_rotmat, edi);
 +
 +            /* Output how well we fit to the reference at the start */
 +            translate_and_rotate(xfit, edi->sref.nr, fit_transvec, fit_rotmat);
-                 rad_project(edi, &edi->star.x[avindex], &edi->vecs.radcon, cr);
-             } else
-                 rad_project(edi, xstart, &edi->vecs.radcon, cr);
++            fprintf(stderr, "ED: Initial RMSD from reference after fit = %f nm",
++                    rmsd_from_structure(xfit, &edi->sref));
++            if (EDstate->nED > 1)
++            {
++                fprintf(stderr, " (ED group %c)", get_EDgroupChar(nr_edi, EDstate->nED));
++            }
++            fprintf(stderr, "\n");
 +
 +            /* Now apply the translation and rotation to the atoms on which ED sampling will be performed */
 +            translate_and_rotate(xstart, edi->sav.nr, fit_transvec, fit_rotmat);
 +
 +            /* calculate initial projections */
 +            project(xstart, edi);
 +
 +            /* For the target and origin structure both a reference (fit) and an
 +             * average structure can be provided in make_edi. If both structures
 +             * are the same, make_edi only stores one of them in the .edi file.
 +             * If they differ, first the fit and then the average structure is stored
 +             * in star (or sor), thus the number of entries in star/sor is
 +             * (n_fit + n_av) with n_fit the size of the fitting group and n_av
 +             * the size of the average group. */
 +
 +            /* process target structure, if required */
 +            if (edi->star.nr > 0)
 +            {
 +                fprintf(stderr, "ED: Fitting target structure to reference structure\n");
 +
 +                /* get translation & rotation for fit of target structure to reference structure */
 +                fit_to_reference(edi->star.x, fit_transvec, fit_rotmat, edi);
 +                /* do the fit */
 +                translate_and_rotate(edi->star.x, edi->star.nr, fit_transvec, fit_rotmat);
 +                if (edi->star.nr == edi->sav.nr)
 +                {
 +                    avindex = 0;
 +                }
 +                else /* edi->star.nr = edi->sref.nr + edi->sav.nr */
 +                {
 +                    /* The last sav.nr indices of the target structure correspond to
 +                     * the average structure, which must be projected */
 +                    avindex = edi->star.nr - edi->sav.nr;
 +                }
-                 rad_project(edi, &edi->sori.x[avindex], &edi->vecs.radacc, cr);
-                 rad_project(edi, &edi->sori.x[avindex], &edi->vecs.radfix, cr);
++                rad_project(edi, &edi->star.x[avindex], &edi->vecs.radcon);
++            }
++            else
++            {
++                rad_project(edi, xstart, &edi->vecs.radcon);
++            }
 +
 +            /* process structure that will serve as origin of expansion circle */
 +            if ( (eEDflood == ed->eEDtype) && (FALSE == edi->flood.bConstForce) )
++            {
 +                fprintf(stderr, "ED: Setting center of flooding potential (0 = average structure)\n");
++            }
 +
 +            if (edi->sori.nr > 0)
 +            {
 +                fprintf(stderr, "ED: Fitting origin structure to reference structure\n");
 +
 +                /* fit this structure to reference structure */
 +                fit_to_reference(edi->sori.x, fit_transvec, fit_rotmat, edi);
 +                /* do the fit */
 +                translate_and_rotate(edi->sori.x, edi->sori.nr, fit_transvec, fit_rotmat);
 +                if (edi->sori.nr == edi->sav.nr)
 +                {
 +                    avindex = 0;
 +                }
 +                else /* edi->sori.nr = edi->sref.nr + edi->sav.nr */
 +                {
 +                    /* For the projection, we need the last sav.nr indices of sori */
 +                    avindex = edi->sori.nr - edi->sav.nr;
 +                }
 +
-                     rad_project(edi, &edi->sori.x[avindex], &edi->flood.vecs, cr);
++                rad_project(edi, &edi->sori.x[avindex], &edi->vecs.radacc);
++                rad_project(edi, &edi->sori.x[avindex], &edi->vecs.radfix);
 +                if ( (eEDflood == ed->eEDtype) && (FALSE == edi->flood.bConstForce) )
 +                {
 +                    fprintf(stderr, "ED: The ORIGIN structure will define the flooding potential center.\n");
 +                    /* Set center of flooding potential to the ORIGIN structure */
-                 rad_project(edi, xstart, &edi->vecs.radacc, cr);
-                 rad_project(edi, xstart, &edi->vecs.radfix, cr);
++                    rad_project(edi, &edi->sori.x[avindex], &edi->flood.vecs);
 +                    /* We already know that no (moving) reference position was provided,
 +                     * therefore we can overwrite refproj[0]*/
 +                    copyEvecReference(&edi->flood.vecs);
 +                }
 +            }
 +            else /* No origin structure given */
 +            {
-                     fprintf(stdout, "ED: EV %d flooding potential center: %11.4e", i, edi->flood.vecs.refproj[i]);
++                rad_project(edi, xstart, &edi->vecs.radacc);
++                rad_project(edi, xstart, &edi->vecs.radfix);
 +                if ( (eEDflood == ed->eEDtype) && (FALSE == edi->flood.bConstForce) )
 +                {
 +                    if (edi->flood.bHarmonic)
 +                    {
 +                        fprintf(stderr, "ED: A (possibly changing) ref. projection will define the flooding potential center.\n");
 +                        for (i=0; i<edi->flood.vecs.neig; i++)
++                        {
 +                            edi->flood.vecs.refproj[i] = edi->flood.vecs.refproj0[i];
++                        }
 +                    }
 +                    else
 +                    {
 +                        fprintf(stderr, "ED: The AVERAGE structure will define the flooding potential center.\n");
 +                        /* Set center of flooding potential to the center of the covariance matrix,
 +                         * i.e. the average structure, i.e. zero in the projected system */
 +                        for (i=0; i<edi->flood.vecs.neig; i++)
++                        {
 +                            edi->flood.vecs.refproj[i] = 0.0;
++                        }
 +                    }
 +                }
 +            }
 +            /* For convenience, output the center of the flooding potential for the eigenvectors */
 +            if ( (eEDflood == ed->eEDtype) && (FALSE == edi->flood.bConstForce) )
 +            {
 +                for (i=0; i<edi->flood.vecs.neig; i++)
 +                {
-             rad_project(edi, xstart, &edi->vecs.linacc, cr);
-             rad_project(edi, xstart, &edi->vecs.linfix, cr);
-             /* Output to file, set the step to -1 so that write_edo knows it was called from init_edsam */
-             if (ed->edo && !(ed->bStartFromCpt))
-                 write_edo(nr_edi, edi, ed, -1, 0);
++                    fprintf(stdout, "ED: EV %d flooding potential center: %11.4e", edi->flood.vecs.ieig[i], edi->flood.vecs.refproj[i]);
 +                    if (edi->flood.bHarmonic)
++                    {
 +                        fprintf(stdout, " (adding %11.4e/timestep)", edi->flood.vecs.refprojslope[i]);
++                    }
 +                    fprintf(stdout, "\n");
 +                }
 +            }
 +
 +            /* set starting projections for linsam */
-         gmx_bcast(sizeof(numedis), &numedis, cr);
++            rad_project(edi, xstart, &edi->vecs.linacc);
++            rad_project(edi, xstart, &edi->vecs.linfix);
 +
 +            /* Prepare for the next edi data set: */
 +            edi=edi->next_edi;
 +        }
 +        /* Cleaning up on the master node: */
 +        sfree(x_pbc);
 +        sfree(xfit);
 +        sfree(xstart);
 +
 +    } /* end of MASTER only section */
 +
 +    if (PAR(cr))
 +    {
 +        /* First let everybody know how many ED data sets to expect */
-         broadcast_ed_data(cr, ed, numedis);
++        gmx_bcast(sizeof(EDstate->nED), &EDstate->nED, cr);
 +        /* Broadcast the essential dynamics / flooding data to all nodes */
-         for (nr_edi = 1; nr_edi <= numedis; nr_edi++)
++        broadcast_ed_data(cr, ed, EDstate->nED);
 +    }
 +    else
 +    {
 +        /* In the single-CPU case, point the local atom numbers pointers to the global
 +         * one, so that we can use the same notation in serial and parallel case: */
 +
 +        /* Loop over all ED data sets (usually only one, though) */
 +        edi=ed->edpar;
-             /* An on we go to the next edi dataset */
++        for (nr_edi = 1; nr_edi <= EDstate->nED; nr_edi++)
 +        {
 +            edi->sref.anrs_loc = edi->sref.anrs;
 +            edi->sav.anrs_loc  = edi->sav.anrs;
 +            edi->star.anrs_loc = edi->star.anrs;
 +            edi->sori.anrs_loc = edi->sori.anrs;
 +            /* For the same reason as above, make a dummy c_ind array: */
 +            snew(edi->sav.c_ind, edi->sav.nr);
 +            /* Initialize the array */
 +            for (i=0; i<edi->sav.nr; i++)
++            {
 +                edi->sav.c_ind[i] = i;
++            }
 +            /* In the general case we will need a different-sized array for the reference indices: */
 +            if (!edi->bRefEqAv)
 +            {
 +                snew(edi->sref.c_ind, edi->sref.nr);
 +                for (i=0; i<edi->sref.nr; i++)
++                {
 +                    edi->sref.c_ind[i] = i;
++                }
 +            }
 +            /* Point to the very same array in case of other structures: */
 +            edi->star.c_ind = edi->sav.c_ind;
 +            edi->sori.c_ind = edi->sav.c_ind;
 +            /* In the serial case, the local number of atoms is the global one: */
 +            edi->sref.nr_loc = edi->sref.nr;
 +            edi->sav.nr_loc  = edi->sav.nr;
 +            edi->star.nr_loc = edi->star.nr;
 +            edi->sori.nr_loc = edi->sori.nr;
 +
-     for (nr_edi = 1; nr_edi <= numedis; nr_edi++)
++            /* An on we go to the next ED group */
 +            edi=edi->next_edi;
 +        }
 +    }
 +
 +    /* Allocate space for ED buffer variables */
 +    /* Again, loop over ED data sets */
 +    edi=ed->edpar;
-         /* An on we go to the next edi dataset */
++    for (nr_edi = 1; nr_edi <= EDstate->nED; nr_edi++)
 +    {
 +        /* Allocate space for ED buffer */
 +        snew(edi->buf, 1);
 +        snew(edi->buf->do_edsam, 1);
 +
 +        /* Space for collective ED buffer variables */
 +
 +        /* Collective positions of atoms with the average indices */
 +        snew(edi->buf->do_edsam->xcoll                  , edi->sav.nr);
 +        snew(edi->buf->do_edsam->shifts_xcoll           , edi->sav.nr); /* buffer for xcoll shifts */
 +        snew(edi->buf->do_edsam->extra_shifts_xcoll     , edi->sav.nr);
 +        /* Collective positions of atoms with the reference indices */
 +        if (!edi->bRefEqAv)
 +        {
 +            snew(edi->buf->do_edsam->xc_ref             , edi->sref.nr);
 +            snew(edi->buf->do_edsam->shifts_xc_ref      , edi->sref.nr); /* To store the shifts in */
 +            snew(edi->buf->do_edsam->extra_shifts_xc_ref, edi->sref.nr);
 +        }
 +
 +        /* Get memory for flooding forces */
 +        snew(edi->flood.forces_cartesian                , edi->sav.nr);
 +
 +#ifdef DUMPEDI
 +        /* Dump it all into one file per process */
 +        dump_edi(edi, cr, nr_edi);
 +#endif
 +
-               t_mdatoms   *md,
++        /* Next ED group */
 +        edi=edi->next_edi;
 +    }
 +
 +    /* Flush the edo file so that the user can check some things
 +     * when the simulation has started */
 +    if (ed->edo)
++    {
 +        fflush(ed->edo);
++    }
 +}
 +
 +
 +void do_edsam(t_inputrec  *ir,
 +              gmx_large_int_t step,
-     gmx_bool bSuppress=FALSE; /* Write .edo file on master? */
 +              t_commrec   *cr,
 +              rvec        xs[],   /* The local current positions on this processor */
 +              rvec        v[],    /* The velocities */
 +              matrix      box,
 +              gmx_edsam_t ed)
 +{
 +    int     i,edinr,iupdate=500;
 +    matrix  rotmat;         /* rotation matrix */
 +    rvec    transvec;       /* translation vector */
 +    rvec    dv,dx,x_unsh;   /* tmp vectors for velocity, distance, unshifted x coordinate */
 +    real    dt_1;           /* 1/dt */
 +    struct t_do_edsam *buf;
 +    t_edpar *edi;
 +    real    rmsdev=-1;      /* RMSD from reference structure prior to applying the constraints */
-     /* Loop over all ED datasets (usually one) */
++    gmx_bool bSuppress=FALSE; /* Write .xvg output file on master? */
 +
 +
 +    /* Check if ED sampling has to be performed */
 +    if ( ed->eEDtype==eEDnone )
++    {
 +        return;
++    }
 +
 +    /* Suppress output on first call of do_edsam if
 +     * two-step sd2 integrator is used */
 +    if ( (ir->eI==eiSD2) && (v != NULL) )
++    {
 +        bSuppress = TRUE;
++    }
 +
 +    dt_1 = 1.0/ir->delta_t;
 +
- #ifdef DEBUG_ED
-             dump_xcoll(edi, buf, cr, step);
- #endif
++    /* Loop over all ED groups (usually one) */
 +    edi  = ed->edpar;
 +    edinr = 0;
 +    while (edi != NULL)
 +    {
 +        edinr++;
 +        if (edi->bNeedDoEdsam)
 +        {
 +
 +            buf=edi->buf->do_edsam;
 +
 +            if (ed->bFirst)
++            {
 +                /* initialise radacc radius for slope criterion */
 +                buf->oldrad=calc_radius(&edi->vecs.radacc);
++            }
 +
 +            /* Copy the positions into buf->xc* arrays and after ED
 +             * feed back corrections to the official positions */
 +
 +            /* Broadcast the ED positions such that every node has all of them
 +             * Every node contributes its local positions xs and stores it in
 +             * the collective buf->xcoll array. Note that for edinr > 1
 +             * xs could already have been modified by an earlier ED */
 +
 +            communicate_group_positions(cr, buf->xcoll, buf->shifts_xcoll, buf->extra_shifts_xcoll, PAR(cr) ? buf->bUpdateShifts : TRUE, xs,
 +                    edi->sav.nr, edi->sav.nr_loc, edi->sav.anrs_loc, edi->sav.c_ind, edi->sav.x_old,  box);
 +
-                 rad_project(edi, buf->xcoll, &edi->vecs.radacc, cr);
-                 rad_project(edi, buf->xcoll, &edi->vecs.radfix, cr);
 +            /* Only assembly reference positions if their indices differ from the average ones */
 +            if (!edi->bRefEqAv)
++            {
 +                communicate_group_positions(cr, buf->xc_ref, buf->shifts_xc_ref, buf->extra_shifts_xc_ref, PAR(cr) ? buf->bUpdateShifts : TRUE, xs,
 +                        edi->sref.nr, edi->sref.nr_loc, edi->sref.anrs_loc, edi->sref.c_ind, edi->sref.x_old, box);
++            }
 +
 +            /* If bUpdateShifts was TRUE then the shifts have just been updated in communicate_group_positions.
 +             * We do not need to update the shifts until the next NS step. Note that dd_make_local_ed_indices
 +             * set bUpdateShifts=TRUE in the parallel case. */
 +            buf->bUpdateShifts = FALSE;
 +
 +            /* Now all nodes have all of the ED positions in edi->sav->xcoll,
 +             * as well as the indices in edi->sav.anrs */
 +
 +            /* Fit the reference indices to the reference structure */
 +            if (edi->bRefEqAv)
++            {
 +                fit_to_reference(buf->xcoll , transvec, rotmat, edi);
++            }
 +            else
++            {
 +                fit_to_reference(buf->xc_ref, transvec, rotmat, edi);
++            }
 +
 +            /* Now apply the translation and rotation to the ED structure */
 +            translate_and_rotate(buf->xcoll, edi->sav.nr, transvec, rotmat);
 +
 +            /* Find out how well we fit to the reference (just for output steps) */
 +            if (do_per_step(step,edi->outfrq) && MASTER(cr))
 +            {
 +                if (edi->bRefEqAv)
 +                {
 +                    /* Indices of reference and average structures are identical,
 +                     * thus we can calculate the rmsd to SREF using xcoll */
 +                    rmsdev = rmsd_from_structure(buf->xcoll,&edi->sref);
 +                }
 +                else
 +                {
 +                    /* We have to translate & rotate the reference atoms first */
 +                    translate_and_rotate(buf->xc_ref, edi->sref.nr, transvec, rotmat);
 +                    rmsdev = rmsd_from_structure(buf->xc_ref,&edi->sref);
 +                }
 +            }
 +
 +            /* update radsam references, when required */
 +            if (do_per_step(step,edi->maxedsteps) && step >= edi->presteps)
 +            {
 +                project(buf->xcoll, edi);
-                     rad_project(edi, buf->xcoll, &edi->vecs.radacc, cr);
++                rad_project(edi, buf->xcoll, &edi->vecs.radacc);
++                rad_project(edi, buf->xcoll, &edi->vecs.radfix);
 +                buf->oldrad=-1.e5;
 +            }
 +
 +            /* update radacc references, when required */
 +            if (do_per_step(step,iupdate) && step >= edi->presteps)
 +            {
 +                edi->vecs.radacc.radius = calc_radius(&edi->vecs.radacc);
 +                if (edi->vecs.radacc.radius - buf->oldrad < edi->slope)
 +                {
 +                    project(buf->xcoll, edi);
-                 } else
++                    rad_project(edi, buf->xcoll, &edi->vecs.radacc);
 +                    buf->oldrad = 0.0;
-                 ed_apply_constraints(buf->xcoll, edi, step+1 - ir->init_step, cr);
++                }
++                else
++                {
 +                    buf->oldrad = edi->vecs.radacc.radius;
++                }
 +            }
 +
 +            /* apply the constraints */
 +            if (step >= edi->presteps && ed_constraints(ed->eEDtype, edi))
 +            {
 +                /* ED constraints should be applied already in the first MD step
 +                 * (which is step 0), therefore we pass step+1 to the routine */
-                     write_edo(edinr, edi, ed, step, rmsdev);
++                ed_apply_constraints(buf->xcoll, edi, step+1 - ir->init_step);
 +            }
 +
 +            /* write to edo, when required */
 +            if (do_per_step(step,edi->outfrq))
 +            {
 +                project(buf->xcoll, edi);
 +                if (MASTER(cr) && !bSuppress)
-         /* Prepare for the next ED dataset */
++                {
++                    write_edo(edi, ed->edo, rmsdev);
++                }
 +            }
 +
 +            /* Copy back the positions unless monitoring only */
 +            if (ed_constraints(ed->eEDtype, edi))
 +            {
 +                /* remove fitting */
 +                rmfit(edi->sav.nr, buf->xcoll, transvec, rotmat);
 +
 +                /* Copy the ED corrected positions into the coordinate array */
 +                /* Each node copies its local part. In the serial case, nat_loc is the
 +                 * total number of ED atoms */
 +                for (i=0; i<edi->sav.nr_loc; i++)
 +                {
 +                    /* Unshift local ED coordinate and store in x_unsh */
 +                    ed_unshift_single_coord(box, buf->xcoll[edi->sav.c_ind[i]],
 +                                            buf->shifts_xcoll[edi->sav.c_ind[i]], x_unsh);
 +
 +                    /* dx is the ED correction to the positions: */
 +                    rvec_sub(x_unsh, xs[edi->sav.anrs_loc[i]], dx);
 +
 +                    if (v != NULL)
 +                    {
 +                        /* dv is the ED correction to the velocity: */
 +                        svmul(dt_1, dx, dv);
 +                        /* apply the velocity correction: */
 +                        rvec_inc(v[edi->sav.anrs_loc[i]], dv);
 +                    }
 +                    /* Finally apply the position correction due to ED: */
 +                    copy_rvec(x_unsh, xs[edi->sav.anrs_loc[i]]);
 +                }
 +            }
 +        } /* END of if (edi->bNeedDoEdsam) */
 +
-     } /* END of loop over ED datasets */
++        /* Prepare for the next ED group */
 +        edi = edi->next_edi;
 +
++    } /* END of loop over ED groups */
 +
 +    ed->bFirst = FALSE;
 +}
index 38bf30cf32a8d5ca229bc17fa5f0ba38e6a50e54,0000000000000000000000000000000000000000..05ccd0eda5f8f5810efeef0980c6aca1d085fc21
mode 100644,000000..100644
--- /dev/null
@@@ -1,134 -1,0 +1,134 @@@
- #ifndef GMX_LIB_MPI
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2012, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 + 
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Groningen Machine for Chemical Simulation
 + */
 +
 +#ifndef FFT5D_H_     
 +#define FFT5D_H_
 +
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#ifdef __cplusplus
 +extern "C" {
 +#endif
 +
 +#ifdef NOGMX
 +/*#define GMX_MPI*/
 +/*#define GMX_FFT_FFTW3*/
 +FILE* debug;
 +#endif
 +
 +#include <types/commrec.h>
 +#include "gmxcomplex.h"
 +#include "gmx_fft.h"
 +
++#ifndef GMX_MPI
 +double MPI_Wtime();
 +#endif
 +
 +/*currently only special optimization for FFTE*/
 +#ifdef GMX_FFT_FFTW3
 +#include <fftw3.h>
 +#endif
 +
 +#ifndef GMX_DOUBLE
 +#define FFTW(x) fftwf_##x
 +#else
 +#define FFTW(x) fftw_##x
 +#endif
 +
 +#ifdef NOGMX
 +struct fft5d_time_t {
 +    double fft,local,mpi1,mpi2;
 +};
 +typedef struct fft5d_time_t *fft5d_time;
 +#else
 +#include "gmx_wallcycle.h"
 +typedef gmx_wallcycle_t fft5d_time;
 +#endif
 +
 +typedef enum fft5d_flags_t {
 +    FFT5D_ORDER_YZ=1,
 +    FFT5D_BACKWARD=2,
 +    FFT5D_REALCOMPLEX=4,
 +    FFT5D_DEBUG=8,
 +    FFT5D_NOMEASURE=16,
 +    FFT5D_INPLACE=32,
 +    FFT5D_NOMALLOC=64
 +} fft5d_flags;
 +
 +struct fft5d_plan_t {
 +    t_complex *lin;
 +    t_complex *lout,*lout2,*lout3;
 +    gmx_fft_t* p1d[3];   /*1D plans*/
 +#ifdef GMX_FFT_FFTW3 
 +    FFTW(plan) p2d;  /*2D plan: used for 1D decomposition if FFT supports transposed output*/
 +    FFTW(plan) p3d;  /*3D plan: used for 0D decomposition if FFT supports transposed output*/
 +    FFTW(plan) mpip[2];
 +#endif
 +    MPI_Comm cart[2];
 +
 +    int N[3],M[3],K[3]; /*local length in transposed coordinate system (if not divisisable max)*/
 +    int pN[3],pM[3], pK[3]; /*local length - not max but length for this processor*/
 +    int oM[3],oK[3]; /*offset for current processor*/
 +    int *iNin[3],*oNin[3],*iNout[3],*oNout[3]; /*size for each processor (if divisisable=max) for out(=split) 
 +                                                 and in (=join) and offsets in transposed coordinate system*/
 +    int C[3],rC[3]; /*global length (of the one global axes) */
 +    /* C!=rC for real<->complex. then C=rC/2 but with potential padding*/
 +    int P[2]; /*size of processor grid*/
 +/*  int fftorder;*/
 +/*  int direction;*/
 +/*  int realcomplex;*/
 +    int flags;
 +    /*int N0,N1,M0,M1,K0,K1;*/
 +    int NG,MG,KG;
 +  /*int P[2];*/
 +    int coor[2];
 +    int nthreads;
 +}; 
 +
 +typedef struct fft5d_plan_t *fft5d_plan;
 +
 +void fft5d_execute(fft5d_plan plan,int thread,fft5d_time times);
 +fft5d_plan fft5d_plan_3d(int N, int M, int K, MPI_Comm comm[2], int flags, t_complex** lin, t_complex** lin2, t_complex** lout2, t_complex** lout3, int nthreads);
 +void fft5d_local_size(fft5d_plan plan,int* N1,int* M0,int* K0,int* K1,int** coor);
 +void fft5d_destroy(fft5d_plan plan);
 +fft5d_plan fft5d_plan_3d_cart(int N, int M, int K, MPI_Comm comm, int P0, int flags, t_complex** lin, t_complex** lin2, t_complex** lout2, t_complex** lout3, int nthreads);
 +void fft5d_compare_data(const t_complex* lin, const t_complex* in, fft5d_plan plan, int bothLocal, int normarlize);
 +
 +#ifdef __cplusplus
 +}
 +#endif
 +#endif /*FFTLIB_H_*/
 +
index 934a8d939c922b27f819439794b603ee93599d87,0000000000000000000000000000000000000000..8ed1a583a0c5ebc7c6febd83816b4d57c36691eb
mode 100644,000000..100644
--- /dev/null
@@@ -1,2755 -1,0 +1,2773 @@@
-     snew_aligned(nbl->table_elec.data,nbl->table_elec.stride*(nbl->table_elec.n+1),16);
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + *
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * GROwing Monsters And Cloning Shrimps
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +#include <string.h>
 +#include <assert.h>
 +#include "sysstuff.h"
 +#include "typedefs.h"
 +#include "vec.h"
 +#include "maths.h"
 +#include "macros.h"
 +#include "smalloc.h"
 +#include "macros.h"
 +#include "gmx_fatal.h"
 +#include "gmx_fatal_collective.h"
 +#include "physics.h"
 +#include "force.h"
 +#include "tables.h"
 +#include "nonbonded.h"
 +#include "invblock.h"
 +#include "names.h"
 +#include "network.h"
 +#include "pbc.h"
 +#include "ns.h"
 +#include "mshift.h"
 +#include "txtdump.h"
 +#include "coulomb.h"
 +#include "md_support.h"
 +#include "md_logging.h"
 +#include "domdec.h"
 +#include "partdec.h"
 +#include "qmmm.h"
 +#include "copyrite.h"
 +#include "mtop_util.h"
 +#include "nbnxn_search.h"
 +#include "nbnxn_atomdata.h"
 +#include "nbnxn_consts.h"
 +#include "statutil.h"
 +#include "gmx_omp_nthreads.h"
 +
 +#ifdef _MSC_VER
 +/* MSVC definition for __cpuid() */
 +#include <intrin.h>
 +#endif
 +
 +#include "types/nbnxn_cuda_types_ext.h"
 +#include "gpu_utils.h"
 +#include "nbnxn_cuda_data_mgmt.h"
 +#include "pmalloc_cuda.h"
 +
 +t_forcerec *mk_forcerec(void)
 +{
 +  t_forcerec *fr;
 +  
 +  snew(fr,1);
 +  
 +  return fr;
 +}
 +
 +#ifdef DEBUG
 +static void pr_nbfp(FILE *fp,real *nbfp,gmx_bool bBHAM,int atnr)
 +{
 +  int i,j;
 +  
 +  for(i=0; (i<atnr); i++) {
 +    for(j=0; (j<atnr); j++) {
 +      fprintf(fp,"%2d - %2d",i,j);
 +      if (bBHAM)
 +      fprintf(fp,"  a=%10g, b=%10g, c=%10g\n",BHAMA(nbfp,atnr,i,j),
 +              BHAMB(nbfp,atnr,i,j),BHAMC(nbfp,atnr,i,j)/6.0);
 +      else
 +      fprintf(fp,"  c6=%10g, c12=%10g\n",C6(nbfp,atnr,i,j)/6.0,
 +            C12(nbfp,atnr,i,j)/12.0);
 +    }
 +  }
 +}
 +#endif
 +
 +static real *mk_nbfp(const gmx_ffparams_t *idef,gmx_bool bBHAM)
 +{
 +  real *nbfp;
 +  int  i,j,k,atnr;
 +  
 +  atnr=idef->atnr;
 +  if (bBHAM) {
 +    snew(nbfp,3*atnr*atnr);
 +    for(i=k=0; (i<atnr); i++) {
 +      for(j=0; (j<atnr); j++,k++) {
 +          BHAMA(nbfp,atnr,i,j) = idef->iparams[k].bham.a;
 +          BHAMB(nbfp,atnr,i,j) = idef->iparams[k].bham.b;
 +          /* nbfp now includes the 6.0 derivative prefactor */
 +          BHAMC(nbfp,atnr,i,j) = idef->iparams[k].bham.c*6.0;
 +      }
 +    }
 +  }
 +  else {
 +    snew(nbfp,2*atnr*atnr);
 +    for(i=k=0; (i<atnr); i++) {
 +      for(j=0; (j<atnr); j++,k++) {
 +          /* nbfp now includes the 6.0/12.0 derivative prefactors */
 +          C6(nbfp,atnr,i,j)   = idef->iparams[k].lj.c6*6.0;
 +          C12(nbfp,atnr,i,j)  = idef->iparams[k].lj.c12*12.0;
 +      }
 +    }
 +  }
 +
 +  return nbfp;
 +}
 +
 +/* This routine sets fr->solvent_opt to the most common solvent in the 
 + * system, e.g. esolSPC or esolTIP4P. It will also mark each charge group in 
 + * the fr->solvent_type array with the correct type (or esolNO).
 + *
 + * Charge groups that fulfill the conditions but are not identical to the
 + * most common one will be marked as esolNO in the solvent_type array. 
 + *
 + * TIP3p is identical to SPC for these purposes, so we call it
 + * SPC in the arrays (Apologies to Bill Jorgensen ;-)
 + * 
 + * NOTE: QM particle should not
 + * become an optimized solvent. Not even if there is only one charge
 + * group in the Qm 
 + */
 +
 +typedef struct 
 +{
 +    int    model;          
 +    int    count;
 +    int    vdwtype[4];
 +    real   charge[4];
 +} solvent_parameters_t;
 +
 +static void
 +check_solvent_cg(const gmx_moltype_t   *molt,
 +                 int                   cg0,
 +                 int                   nmol,
 +                 const unsigned char   *qm_grpnr,
 +                 const t_grps          *qm_grps,
 +                 t_forcerec *          fr,
 +                 int                   *n_solvent_parameters,
 +                 solvent_parameters_t  **solvent_parameters_p,
 +                 int                   cginfo,
 +                 int                   *cg_sp)
 +{
 +    const t_blocka *  excl;
 +    t_atom            *atom;
 +    int               j,k;
 +    int               j0,j1,nj;
 +    gmx_bool              perturbed;
 +    gmx_bool              has_vdw[4];
 +    gmx_bool              match;
 +    real              tmp_charge[4];
 +    int               tmp_vdwtype[4];
 +    int               tjA;
 +    gmx_bool              qm;
 +    solvent_parameters_t *solvent_parameters;
 +
 +    /* We use a list with parameters for each solvent type. 
 +     * Every time we discover a new molecule that fulfills the basic 
 +     * conditions for a solvent we compare with the previous entries
 +     * in these lists. If the parameters are the same we just increment
 +     * the counter for that type, and otherwise we create a new type
 +     * based on the current molecule.
 +     *
 +     * Once we've finished going through all molecules we check which
 +     * solvent is most common, and mark all those molecules while we
 +     * clear the flag on all others.
 +     */   
 +
 +    solvent_parameters = *solvent_parameters_p;
 +
 +    /* Mark the cg first as non optimized */
 +    *cg_sp = -1;
 +    
 +    /* Check if this cg has no exclusions with atoms in other charge groups
 +     * and all atoms inside the charge group excluded.
 +     * We only have 3 or 4 atom solvent loops.
 +     */
 +    if (GET_CGINFO_EXCL_INTER(cginfo) ||
 +        !GET_CGINFO_EXCL_INTRA(cginfo))
 +    {
 +        return;
 +    }
 +
 +    /* Get the indices of the first atom in this charge group */
 +    j0     = molt->cgs.index[cg0];
 +    j1     = molt->cgs.index[cg0+1];
 +    
 +    /* Number of atoms in our molecule */
 +    nj     = j1 - j0;
 +
 +    if (debug) {
 +        fprintf(debug,
 +                "Moltype '%s': there are %d atoms in this charge group\n",
 +                *molt->name,nj);
 +    }
 +    
 +    /* Check if it could be an SPC (3 atoms) or TIP4p (4) water,
 +     * otherwise skip it.
 +     */
 +    if (nj<3 || nj>4)
 +    {
 +        return;
 +    }
 +    
 +    /* Check if we are doing QM on this group */
 +    qm = FALSE; 
 +    if (qm_grpnr != NULL)
 +    {
 +        for(j=j0 ; j<j1 && !qm; j++)
 +        {
 +            qm = (qm_grpnr[j] < qm_grps->nr - 1);
 +        }
 +    }
 +    /* Cannot use solvent optimization with QM */
 +    if (qm)
 +    {
 +        return;
 +    }
 +    
 +    atom = molt->atoms.atom;
 +
 +    /* Still looks like a solvent, time to check parameters */
 +    
 +    /* If it is perturbed (free energy) we can't use the solvent loops,
 +     * so then we just skip to the next molecule.
 +     */   
 +    perturbed = FALSE; 
 +    
 +    for(j=j0; j<j1 && !perturbed; j++)
 +    {
 +        perturbed = PERTURBED(atom[j]);
 +    }
 +    
 +    if (perturbed)
 +    {
 +        return;
 +    }
 +    
 +    /* Now it's only a question if the VdW and charge parameters 
 +     * are OK. Before doing the check we compare and see if they are 
 +     * identical to a possible previous solvent type.
 +     * First we assign the current types and charges.    
 +     */
 +    for(j=0; j<nj; j++)
 +    {
 +        tmp_vdwtype[j] = atom[j0+j].type;
 +        tmp_charge[j]  = atom[j0+j].q;
 +    } 
 +    
 +    /* Does it match any previous solvent type? */
 +    for(k=0 ; k<*n_solvent_parameters; k++)
 +    {
 +        match = TRUE;
 +        
 +        
 +        /* We can only match SPC with 3 atoms and TIP4p with 4 atoms */
 +        if( (solvent_parameters[k].model==esolSPC   && nj!=3)  ||
 +            (solvent_parameters[k].model==esolTIP4P && nj!=4) )
 +            match = FALSE;
 +        
 +        /* Check that types & charges match for all atoms in molecule */
 +        for(j=0 ; j<nj && match==TRUE; j++)
 +        {                     
 +            if (tmp_vdwtype[j] != solvent_parameters[k].vdwtype[j])
 +            {
 +                match = FALSE;
 +            }
 +            if(tmp_charge[j] != solvent_parameters[k].charge[j])
 +            {
 +                match = FALSE;
 +            }
 +        }
 +        if (match == TRUE)
 +        {
 +            /* Congratulations! We have a matched solvent.
 +             * Flag it with this type for later processing.
 +             */
 +            *cg_sp = k;
 +            solvent_parameters[k].count += nmol;
 +
 +            /* We are done with this charge group */
 +            return;
 +        }
 +    }
 +    
 +    /* If we get here, we have a tentative new solvent type.
 +     * Before we add it we must check that it fulfills the requirements
 +     * of the solvent optimized loops. First determine which atoms have
 +     * VdW interactions.   
 +     */
 +    for(j=0; j<nj; j++) 
 +    {
 +        has_vdw[j] = FALSE;
 +        tjA        = tmp_vdwtype[j];
 +        
 +        /* Go through all other tpes and see if any have non-zero
 +         * VdW parameters when combined with this one.
 +         */   
 +        for(k=0; k<fr->ntype && (has_vdw[j]==FALSE); k++)
 +        {
 +            /* We already checked that the atoms weren't perturbed,
 +             * so we only need to check state A now.
 +             */ 
 +            if (fr->bBHAM) 
 +            {
 +                has_vdw[j] = (has_vdw[j] || 
 +                              (BHAMA(fr->nbfp,fr->ntype,tjA,k) != 0.0) ||
 +                              (BHAMB(fr->nbfp,fr->ntype,tjA,k) != 0.0) ||
 +                              (BHAMC(fr->nbfp,fr->ntype,tjA,k) != 0.0));
 +            }
 +            else
 +            {
 +                /* Standard LJ */
 +                has_vdw[j] = (has_vdw[j] || 
 +                              (C6(fr->nbfp,fr->ntype,tjA,k)  != 0.0) ||
 +                              (C12(fr->nbfp,fr->ntype,tjA,k) != 0.0));
 +            }
 +        }
 +    }
 +    
 +    /* Now we know all we need to make the final check and assignment. */
 +    if (nj == 3)
 +    {
 +        /* So, is it an SPC?
 +         * For this we require thatn all atoms have charge, 
 +         * the charges on atom 2 & 3 should be the same, and only
 +         * atom 1 might have VdW.
 +         */
 +        if (has_vdw[1] == FALSE &&
 +            has_vdw[2] == FALSE &&
 +            tmp_charge[0]  != 0 &&
 +            tmp_charge[1]  != 0 &&
 +            tmp_charge[2]  == tmp_charge[1])
 +        {
 +            srenew(solvent_parameters,*n_solvent_parameters+1);
 +            solvent_parameters[*n_solvent_parameters].model = esolSPC;
 +            solvent_parameters[*n_solvent_parameters].count = nmol;
 +            for(k=0;k<3;k++)
 +            {
 +                solvent_parameters[*n_solvent_parameters].vdwtype[k] = tmp_vdwtype[k];
 +                solvent_parameters[*n_solvent_parameters].charge[k]  = tmp_charge[k];
 +            }
 +
 +            *cg_sp = *n_solvent_parameters;
 +            (*n_solvent_parameters)++;
 +        }
 +    }
 +    else if (nj==4)
 +    {
 +        /* Or could it be a TIP4P?
 +         * For this we require thatn atoms 2,3,4 have charge, but not atom 1. 
 +         * Only atom 1 mght have VdW.
 +         */
 +        if(has_vdw[1] == FALSE &&
 +           has_vdw[2] == FALSE &&
 +           has_vdw[3] == FALSE &&
 +           tmp_charge[0]  == 0 &&
 +           tmp_charge[1]  != 0 &&
 +           tmp_charge[2]  == tmp_charge[1] &&
 +           tmp_charge[3]  != 0)
 +        {
 +            srenew(solvent_parameters,*n_solvent_parameters+1);
 +            solvent_parameters[*n_solvent_parameters].model = esolTIP4P;
 +            solvent_parameters[*n_solvent_parameters].count = nmol;
 +            for(k=0;k<4;k++)
 +            {
 +                solvent_parameters[*n_solvent_parameters].vdwtype[k] = tmp_vdwtype[k];
 +                solvent_parameters[*n_solvent_parameters].charge[k]  = tmp_charge[k];
 +            }
 +            
 +            *cg_sp = *n_solvent_parameters;
 +            (*n_solvent_parameters)++;
 +        }
 +    }
 +
 +    *solvent_parameters_p = solvent_parameters;
 +}
 +
 +static void
 +check_solvent(FILE *                fp,
 +              const gmx_mtop_t *    mtop,
 +              t_forcerec *          fr,
 +              cginfo_mb_t           *cginfo_mb)
 +{
 +    const t_block *   cgs;
 +    const t_block *   mols;
 +    const gmx_moltype_t *molt;
 +    int               mb,mol,cg_mol,at_offset,cg_offset,am,cgm,i,nmol_ch,nmol;
 +    int               n_solvent_parameters;
 +    solvent_parameters_t *solvent_parameters;
 +    int               **cg_sp;
 +    int               bestsp,bestsol;
 +
 +    if (debug)
 +    {
 +        fprintf(debug,"Going to determine what solvent types we have.\n");
 +    }
 +
 +    mols = &mtop->mols;
 +
 +    n_solvent_parameters = 0;
 +    solvent_parameters = NULL;
 +    /* Allocate temporary array for solvent type */
 +    snew(cg_sp,mtop->nmolblock);
 +
 +    cg_offset = 0;
 +    at_offset = 0;
 +    for(mb=0; mb<mtop->nmolblock; mb++)
 +    {
 +        molt = &mtop->moltype[mtop->molblock[mb].type];
 +        cgs  = &molt->cgs;
 +        /* Here we have to loop over all individual molecules
 +         * because we need to check for QMMM particles.
 +         */
 +        snew(cg_sp[mb],cginfo_mb[mb].cg_mod);
 +        nmol_ch = cginfo_mb[mb].cg_mod/cgs->nr;
 +        nmol    = mtop->molblock[mb].nmol/nmol_ch;
 +        for(mol=0; mol<nmol_ch; mol++)
 +        {
 +            cgm = mol*cgs->nr;
 +            am  = mol*cgs->index[cgs->nr];
 +            for(cg_mol=0; cg_mol<cgs->nr; cg_mol++)
 +            {
 +                check_solvent_cg(molt,cg_mol,nmol,
 +                                 mtop->groups.grpnr[egcQMMM] ?
 +                                 mtop->groups.grpnr[egcQMMM]+at_offset+am : 0,
 +                                 &mtop->groups.grps[egcQMMM],
 +                                 fr,
 +                                 &n_solvent_parameters,&solvent_parameters,
 +                                 cginfo_mb[mb].cginfo[cgm+cg_mol],
 +                                 &cg_sp[mb][cgm+cg_mol]);
 +            }
 +        }
 +        cg_offset += cgs->nr;
 +        at_offset += cgs->index[cgs->nr];
 +    }
 +
 +    /* Puh! We finished going through all charge groups.
 +     * Now find the most common solvent model.
 +     */   
 +    
 +    /* Most common solvent this far */
 +    bestsp = -2;
 +    for(i=0;i<n_solvent_parameters;i++)
 +    {
 +        if (bestsp == -2 ||
 +            solvent_parameters[i].count > solvent_parameters[bestsp].count)
 +        {
 +            bestsp = i;
 +        }
 +    }
 +    
 +    if (bestsp >= 0)
 +    {
 +        bestsol = solvent_parameters[bestsp].model;
 +    }
 +    else
 +    {
 +        bestsol = esolNO;
 +    }
 +    
 +#ifdef DISABLE_WATER_NLIST
 +      bestsol = esolNO;
 +#endif
 +
 +    fr->nWatMol = 0;
 +    for(mb=0; mb<mtop->nmolblock; mb++)
 +    {
 +        cgs = &mtop->moltype[mtop->molblock[mb].type].cgs;
 +        nmol = (mtop->molblock[mb].nmol*cgs->nr)/cginfo_mb[mb].cg_mod;
 +        for(i=0; i<cginfo_mb[mb].cg_mod; i++)
 +        {
 +            if (cg_sp[mb][i] == bestsp)
 +            {
 +                SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[i],bestsol);
 +                fr->nWatMol += nmol;
 +            }
 +            else
 +            {
 +                SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[i],esolNO);
 +            }
 +        }
 +        sfree(cg_sp[mb]);
 +    }
 +    sfree(cg_sp);
 +    
 +    if (bestsol != esolNO && fp!=NULL)
 +    {
 +        fprintf(fp,"\nEnabling %s-like water optimization for %d molecules.\n\n",
 +                esol_names[bestsol],
 +                solvent_parameters[bestsp].count);
 +    }
 +
 +    sfree(solvent_parameters);
 +    fr->solvent_opt = bestsol;
 +}
 +
 +enum { acNONE=0, acCONSTRAINT, acSETTLE };
 +
 +static cginfo_mb_t *init_cginfo_mb(FILE *fplog,const gmx_mtop_t *mtop,
 +                                   t_forcerec *fr,gmx_bool bNoSolvOpt,
 +                                   gmx_bool *bExcl_IntraCGAll_InterCGNone)
 +{
 +    const t_block *cgs;
 +    const t_blocka *excl;
 +    const gmx_moltype_t *molt;
 +    const gmx_molblock_t *molb;
 +    cginfo_mb_t *cginfo_mb;
 +    gmx_bool *type_VDW;
 +    int  *cginfo;
 +    int  cg_offset,a_offset,cgm,am;
 +    int  mb,m,ncg_tot,cg,a0,a1,gid,ai,j,aj,excl_nalloc;
 +    int  *a_con;
 +    int  ftype;
 +    int  ia;
 +    gmx_bool bId,*bExcl,bExclIntraAll,bExclInter,bHaveVDW,bHaveQ;
 +
 +    ncg_tot = ncg_mtop(mtop);
 +    snew(cginfo_mb,mtop->nmolblock);
 +
 +    snew(type_VDW,fr->ntype);
 +    for(ai=0; ai<fr->ntype; ai++)
 +    {
 +        type_VDW[ai] = FALSE;
 +        for(j=0; j<fr->ntype; j++)
 +        {
 +            type_VDW[ai] = type_VDW[ai] ||
 +                fr->bBHAM ||
 +                C6(fr->nbfp,fr->ntype,ai,j) != 0 ||
 +                C12(fr->nbfp,fr->ntype,ai,j) != 0;
 +        }
 +    }
 +
 +    *bExcl_IntraCGAll_InterCGNone = TRUE;
 +
 +    excl_nalloc = 10;
 +    snew(bExcl,excl_nalloc);
 +    cg_offset = 0;
 +    a_offset  = 0;
 +    for(mb=0; mb<mtop->nmolblock; mb++)
 +    {
 +        molb = &mtop->molblock[mb];
 +        molt = &mtop->moltype[molb->type];
 +        cgs  = &molt->cgs;
 +        excl = &molt->excls;
 +
 +        /* Check if the cginfo is identical for all molecules in this block.
 +         * If so, we only need an array of the size of one molecule.
 +         * Otherwise we make an array of #mol times #cgs per molecule.
 +         */
 +        bId = TRUE;
 +        am = 0;
 +        for(m=0; m<molb->nmol; m++)
 +        {
 +            am = m*cgs->index[cgs->nr];
 +            for(cg=0; cg<cgs->nr; cg++)
 +            {
 +                a0 = cgs->index[cg];
 +                a1 = cgs->index[cg+1];
 +                if (ggrpnr(&mtop->groups,egcENER,a_offset+am+a0) !=
 +                    ggrpnr(&mtop->groups,egcENER,a_offset   +a0))
 +                {
 +                    bId = FALSE;
 +                }
 +                if (mtop->groups.grpnr[egcQMMM] != NULL)
 +                {
 +                    for(ai=a0; ai<a1; ai++)
 +                    {
 +                        if (mtop->groups.grpnr[egcQMMM][a_offset+am+ai] !=
 +                            mtop->groups.grpnr[egcQMMM][a_offset   +ai])
 +                        {
 +                            bId = FALSE;
 +                        }
 +                    }
 +                }
 +            }
 +        }
 +
 +        cginfo_mb[mb].cg_start = cg_offset;
 +        cginfo_mb[mb].cg_end   = cg_offset + molb->nmol*cgs->nr;
 +        cginfo_mb[mb].cg_mod   = (bId ? 1 : molb->nmol)*cgs->nr;
 +        snew(cginfo_mb[mb].cginfo,cginfo_mb[mb].cg_mod);
 +        cginfo = cginfo_mb[mb].cginfo;
 +
 +        /* Set constraints flags for constrained atoms */
 +        snew(a_con,molt->atoms.nr);
 +        for(ftype=0; ftype<F_NRE; ftype++)
 +        {
 +            if (interaction_function[ftype].flags & IF_CONSTRAINT)
 +            {
 +                int nral;
 +
 +                nral = NRAL(ftype);
 +                for(ia=0; ia<molt->ilist[ftype].nr; ia+=1+nral)
 +                {
 +                    int a;
 +
 +                    for(a=0; a<nral; a++)
 +                    {
 +                        a_con[molt->ilist[ftype].iatoms[ia+1+a]] =
 +                            (ftype == F_SETTLE ? acSETTLE : acCONSTRAINT);
 +                    }
 +                }
 +            }
 +        }
 +
 +        for(m=0; m<(bId ? 1 : molb->nmol); m++)
 +        {
 +            cgm = m*cgs->nr;
 +            am  = m*cgs->index[cgs->nr];
 +            for(cg=0; cg<cgs->nr; cg++)
 +            {
 +                a0 = cgs->index[cg];
 +                a1 = cgs->index[cg+1];
 +
 +                /* Store the energy group in cginfo */
 +                gid = ggrpnr(&mtop->groups,egcENER,a_offset+am+a0);
 +                SET_CGINFO_GID(cginfo[cgm+cg],gid);
 +                
 +                /* Check the intra/inter charge group exclusions */
 +                if (a1-a0 > excl_nalloc) {
 +                    excl_nalloc = a1 - a0;
 +                    srenew(bExcl,excl_nalloc);
 +                }
 +                /* bExclIntraAll: all intra cg interactions excluded
 +                 * bExclInter:    any inter cg interactions excluded
 +                 */
 +                bExclIntraAll = TRUE;
 +                bExclInter    = FALSE;
 +                bHaveVDW      = FALSE;
 +                bHaveQ        = FALSE;
 +                for(ai=a0; ai<a1; ai++)
 +                {
 +                    /* Check VDW and electrostatic interactions */
 +                    bHaveVDW = bHaveVDW || (type_VDW[molt->atoms.atom[ai].type] ||
 +                                            type_VDW[molt->atoms.atom[ai].typeB]);
 +                    bHaveQ  = bHaveQ    || (molt->atoms.atom[ai].q != 0 ||
 +                                            molt->atoms.atom[ai].qB != 0);
 +
 +                    /* Clear the exclusion list for atom ai */
 +                    for(aj=a0; aj<a1; aj++)
 +                    {
 +                        bExcl[aj-a0] = FALSE;
 +                    }
 +                    /* Loop over all the exclusions of atom ai */
 +                    for(j=excl->index[ai]; j<excl->index[ai+1]; j++)
 +                    {
 +                        aj = excl->a[j];
 +                        if (aj < a0 || aj >= a1)
 +                        {
 +                            bExclInter = TRUE;
 +                        }
 +                        else
 +                        {
 +                            bExcl[aj-a0] = TRUE;
 +                        }
 +                    }
 +                    /* Check if ai excludes a0 to a1 */
 +                    for(aj=a0; aj<a1; aj++)
 +                    {
 +                        if (!bExcl[aj-a0])
 +                        {
 +                            bExclIntraAll = FALSE;
 +                        }
 +                    }
 +
 +                    switch (a_con[ai])
 +                    {
 +                    case acCONSTRAINT:
 +                        SET_CGINFO_CONSTR(cginfo[cgm+cg]);
 +                        break;
 +                    case acSETTLE:
 +                        SET_CGINFO_SETTLE(cginfo[cgm+cg]);
 +                        break;
 +                    default:
 +                        break;
 +                    }
 +                }
 +                if (bExclIntraAll)
 +                {
 +                    SET_CGINFO_EXCL_INTRA(cginfo[cgm+cg]);
 +                }
 +                if (bExclInter)
 +                {
 +                    SET_CGINFO_EXCL_INTER(cginfo[cgm+cg]);
 +                }
 +                if (a1 - a0 > MAX_CHARGEGROUP_SIZE)
 +                {
 +                    /* The size in cginfo is currently only read with DD */
 +                    gmx_fatal(FARGS,"A charge group has size %d which is larger than the limit of %d atoms",a1-a0,MAX_CHARGEGROUP_SIZE);
 +                }
 +                if (bHaveVDW)
 +                {
 +                    SET_CGINFO_HAS_VDW(cginfo[cgm+cg]);
 +                }
 +                if (bHaveQ)
 +                {
 +                    SET_CGINFO_HAS_Q(cginfo[cgm+cg]);
 +                }
 +                /* Store the charge group size */
 +                SET_CGINFO_NATOMS(cginfo[cgm+cg],a1-a0);
 +
 +                if (!bExclIntraAll || bExclInter)
 +                {
 +                    *bExcl_IntraCGAll_InterCGNone = FALSE;
 +                }
 +            }
 +        }
 +
 +        sfree(a_con);
 +
 +        cg_offset += molb->nmol*cgs->nr;
 +        a_offset  += molb->nmol*cgs->index[cgs->nr];
 +    }
 +    sfree(bExcl);
 +    
 +    /* the solvent optimizer is called after the QM is initialized,
 +     * because we don't want to have the QM subsystemto become an
 +     * optimized solvent
 +     */
 +
 +    check_solvent(fplog,mtop,fr,cginfo_mb);
 +    
 +    if (getenv("GMX_NO_SOLV_OPT"))
 +    {
 +        if (fplog)
 +        {
 +            fprintf(fplog,"Found environment variable GMX_NO_SOLV_OPT.\n"
 +                    "Disabling all solvent optimization\n");
 +        }
 +        fr->solvent_opt = esolNO;
 +    }
 +    if (bNoSolvOpt)
 +    {
 +        fr->solvent_opt = esolNO;
 +    }
 +    if (!fr->solvent_opt)
 +    {
 +        for(mb=0; mb<mtop->nmolblock; mb++)
 +        {
 +            for(cg=0; cg<cginfo_mb[mb].cg_mod; cg++)
 +            {
 +                SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[cg],esolNO);
 +            }
 +        }
 +    }
 +    
 +    return cginfo_mb;
 +}
 +
 +static int *cginfo_expand(int nmb,cginfo_mb_t *cgi_mb)
 +{
 +    int ncg,mb,cg;
 +    int *cginfo;
 +
 +    ncg = cgi_mb[nmb-1].cg_end;
 +    snew(cginfo,ncg);
 +    mb = 0;
 +    for(cg=0; cg<ncg; cg++)
 +    {
 +        while (cg >= cgi_mb[mb].cg_end)
 +        {
 +            mb++;
 +        }
 +        cginfo[cg] =
 +            cgi_mb[mb].cginfo[(cg - cgi_mb[mb].cg_start) % cgi_mb[mb].cg_mod];
 +    }
 +
 +    return cginfo;
 +}
 +
 +static void set_chargesum(FILE *log,t_forcerec *fr,const gmx_mtop_t *mtop)
 +{
 +    double qsum,q2sum,q;
 +    int    mb,nmol,i;
 +    const t_atoms *atoms;
 +    
 +    qsum  = 0;
 +    q2sum = 0;
 +    for(mb=0; mb<mtop->nmolblock; mb++)
 +    {
 +        nmol  = mtop->molblock[mb].nmol;
 +        atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
 +        for(i=0; i<atoms->nr; i++)
 +        {
 +            q = atoms->atom[i].q;
 +            qsum  += nmol*q;
 +            q2sum += nmol*q*q;
 +        }
 +    }
 +    fr->qsum[0]  = qsum;
 +    fr->q2sum[0] = q2sum;
 +    if (fr->efep != efepNO)
 +    {
 +        qsum  = 0;
 +        q2sum = 0;
 +        for(mb=0; mb<mtop->nmolblock; mb++)
 +        {
 +            nmol  = mtop->molblock[mb].nmol;
 +            atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
 +            for(i=0; i<atoms->nr; i++)
 +            {
 +                q = atoms->atom[i].qB;
 +                qsum  += nmol*q;
 +                q2sum += nmol*q*q;
 +            }
 +            fr->qsum[1]  = qsum;
 +            fr->q2sum[1] = q2sum;
 +        }
 +    }
 +    else
 +    {
 +        fr->qsum[1]  = fr->qsum[0];
 +        fr->q2sum[1] = fr->q2sum[0];
 +    }
 +    if (log) {
 +        if (fr->efep == efepNO)
 +            fprintf(log,"System total charge: %.3f\n",fr->qsum[0]);
 +        else
 +            fprintf(log,"System total charge, top. A: %.3f top. B: %.3f\n",
 +                    fr->qsum[0],fr->qsum[1]);
 +    }
 +}
 +
 +void update_forcerec(FILE *log,t_forcerec *fr,matrix box)
 +{
 +    if (fr->eeltype == eelGRF)
 +    {
 +        calc_rffac(NULL,fr->eeltype,fr->epsilon_r,fr->epsilon_rf,
 +                   fr->rcoulomb,fr->temp,fr->zsquare,box,
 +                   &fr->kappa,&fr->k_rf,&fr->c_rf);
 +    }
 +}
 +
 +void set_avcsixtwelve(FILE *fplog,t_forcerec *fr,const gmx_mtop_t *mtop)
 +{
 +    const t_atoms *atoms,*atoms_tpi;
 +    const t_blocka *excl;
 +    int    mb,nmol,nmolc,i,j,tpi,tpj,j1,j2,k,n,nexcl,q;
 +#if (defined SIZEOF_LONG_LONG_INT) && (SIZEOF_LONG_LONG_INT >= 8)    
 +    long long int  npair,npair_ij,tmpi,tmpj;
 +#else
 +    double npair, npair_ij,tmpi,tmpj;
 +#endif
 +    double csix,ctwelve;
 +    int    ntp,*typecount;
 +    gmx_bool   bBHAM;
 +    real   *nbfp;
 +
 +    ntp = fr->ntype;
 +    bBHAM = fr->bBHAM;
 +    nbfp = fr->nbfp;
 +    
 +    for(q=0; q<(fr->efep==efepNO ? 1 : 2); q++) {
 +        csix = 0;
 +        ctwelve = 0;
 +        npair = 0;
 +        nexcl = 0;
 +        if (!fr->n_tpi) {
 +            /* Count the types so we avoid natoms^2 operations */
 +            snew(typecount,ntp);
 +            for(mb=0; mb<mtop->nmolblock; mb++) {
 +                nmol  = mtop->molblock[mb].nmol;
 +                atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
 +                for(i=0; i<atoms->nr; i++) {
 +                    if (q == 0)
 +                    {
 +                        tpi = atoms->atom[i].type;
 +                    }
 +                    else
 +                    {
 +                        tpi = atoms->atom[i].typeB;
 +                    }
 +                    typecount[tpi] += nmol;
 +                }
 +            }
 +            for(tpi=0; tpi<ntp; tpi++) {
 +                for(tpj=tpi; tpj<ntp; tpj++) {
 +                    tmpi = typecount[tpi];
 +                    tmpj = typecount[tpj];
 +                    if (tpi != tpj)
 +                    {
 +                        npair_ij = tmpi*tmpj;
 +                    }
 +                    else
 +                    {
 +                        npair_ij = tmpi*(tmpi - 1)/2;
 +                    }
 +                    if (bBHAM) {
 +                        /* nbfp now includes the 6.0 derivative prefactor */
 +                        csix    += npair_ij*BHAMC(nbfp,ntp,tpi,tpj)/6.0;
 +                    } else {
 +                        /* nbfp now includes the 6.0/12.0 derivative prefactors */
 +                        csix    += npair_ij*   C6(nbfp,ntp,tpi,tpj)/6.0;
 +                        ctwelve += npair_ij*  C12(nbfp,ntp,tpi,tpj)/12.0;
 +                    }
 +                    npair += npair_ij;
 +                }
 +            }
 +            sfree(typecount);
 +            /* Subtract the excluded pairs.
 +             * The main reason for substracting exclusions is that in some cases
 +             * some combinations might never occur and the parameters could have
 +             * any value. These unused values should not influence the dispersion
 +             * correction.
 +             */
 +            for(mb=0; mb<mtop->nmolblock; mb++) {
 +                nmol  = mtop->molblock[mb].nmol;
 +                atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
 +                excl  = &mtop->moltype[mtop->molblock[mb].type].excls;
 +                for(i=0; (i<atoms->nr); i++) {
 +                    if (q == 0)
 +                    {
 +                        tpi = atoms->atom[i].type;
 +                    }
 +                    else
 +                    {
 +                        tpi = atoms->atom[i].typeB;
 +                    }
 +                    j1  = excl->index[i];
 +                    j2  = excl->index[i+1];
 +                    for(j=j1; j<j2; j++) {
 +                        k = excl->a[j];
 +                        if (k > i)
 +                        {
 +                            if (q == 0)
 +                            {
 +                                tpj = atoms->atom[k].type;
 +                            }
 +                            else
 +                            {
 +                                tpj = atoms->atom[k].typeB;
 +                            }
 +                            if (bBHAM) {
 +                                /* nbfp now includes the 6.0 derivative prefactor */
 +                               csix -= nmol*BHAMC(nbfp,ntp,tpi,tpj)/6.0;
 +                            } else {
 +                                /* nbfp now includes the 6.0/12.0 derivative prefactors */
 +                                csix    -= nmol*C6 (nbfp,ntp,tpi,tpj)/6.0;
 +                                ctwelve -= nmol*C12(nbfp,ntp,tpi,tpj)/12.0;
 +                            }
 +                            nexcl += nmol;
 +                        }
 +                    }
 +                }
 +            }
 +        } else {
 +            /* Only correct for the interaction of the test particle
 +             * with the rest of the system.
 +             */
 +            atoms_tpi =
 +                &mtop->moltype[mtop->molblock[mtop->nmolblock-1].type].atoms;
 +
 +            npair = 0;
 +            for(mb=0; mb<mtop->nmolblock; mb++) {
 +                nmol  = mtop->molblock[mb].nmol;
 +                atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
 +                for(j=0; j<atoms->nr; j++) {
 +                    nmolc = nmol;
 +                    /* Remove the interaction of the test charge group
 +                     * with itself.
 +                     */
 +                    if (mb == mtop->nmolblock-1)
 +                    {
 +                        nmolc--;
 +                        
 +                        if (mb == 0 && nmol == 1)
 +                        {
 +                            gmx_fatal(FARGS,"Old format tpr with TPI, please generate a new tpr file");
 +                        }
 +                    }
 +                    if (q == 0)
 +                    {
 +                        tpj = atoms->atom[j].type;
 +                    }
 +                    else
 +                    {
 +                        tpj = atoms->atom[j].typeB;
 +                    }
 +                    for(i=0; i<fr->n_tpi; i++)
 +                    {
 +                        if (q == 0)
 +                        {
 +                            tpi = atoms_tpi->atom[i].type;
 +                        }
 +                        else
 +                        {
 +                            tpi = atoms_tpi->atom[i].typeB;
 +                        }
 +                        if (bBHAM)
 +                        {
 +                            /* nbfp now includes the 6.0 derivative prefactor */
 +                            csix    += nmolc*BHAMC(nbfp,ntp,tpi,tpj)/6.0;
 +                        }
 +                        else
 +                        {
 +                            /* nbfp now includes the 6.0/12.0 derivative prefactors */
 +                            csix    += nmolc*C6 (nbfp,ntp,tpi,tpj)/6.0;
 +                            ctwelve += nmolc*C12(nbfp,ntp,tpi,tpj)/12.0;
 +                        }
 +                        npair += nmolc;
 +                    }
 +                }
 +            }
 +        }
 +        if (npair - nexcl <= 0 && fplog) {
 +            fprintf(fplog,"\nWARNING: There are no atom pairs for dispersion correction\n\n");
 +            csix     = 0;
 +            ctwelve  = 0;
 +        } else {
 +            csix    /= npair - nexcl;
 +            ctwelve /= npair - nexcl;
 +        }
 +        if (debug) {
 +            fprintf(debug,"Counted %d exclusions\n",nexcl);
 +            fprintf(debug,"Average C6 parameter is: %10g\n",(double)csix);
 +            fprintf(debug,"Average C12 parameter is: %10g\n",(double)ctwelve);
 +        }
 +        fr->avcsix[q]    = csix;
 +        fr->avctwelve[q] = ctwelve;
 +    }
 +    if (fplog != NULL)
 +    {
 +        if (fr->eDispCorr == edispcAllEner ||
 +            fr->eDispCorr == edispcAllEnerPres)
 +        {
 +            fprintf(fplog,"Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
 +                    fr->avcsix[0],fr->avctwelve[0]);
 +        }
 +        else
 +        {
 +            fprintf(fplog,"Long Range LJ corr.: <C6> %10.4e\n",fr->avcsix[0]);
 +        }
 +    }
 +}
 +
 +
 +static void set_bham_b_max(FILE *fplog,t_forcerec *fr,
 +                           const gmx_mtop_t *mtop)
 +{
 +    const t_atoms *at1,*at2;
 +    int  mt1,mt2,i,j,tpi,tpj,ntypes;
 +    real b,bmin;
 +    real *nbfp;
 +
 +    if (fplog)
 +    {
 +        fprintf(fplog,"Determining largest Buckingham b parameter for table\n");
 +    }
 +    nbfp   = fr->nbfp;
 +    ntypes = fr->ntype;
 +    
 +    bmin           = -1;
 +    fr->bham_b_max = 0;
 +    for(mt1=0; mt1<mtop->nmoltype; mt1++)
 +    {
 +        at1 = &mtop->moltype[mt1].atoms;
 +        for(i=0; (i<at1->nr); i++)
 +        {
 +            tpi = at1->atom[i].type;
 +            if (tpi >= ntypes)
 +                gmx_fatal(FARGS,"Atomtype[%d] = %d, maximum = %d",i,tpi,ntypes);
 +            
 +            for(mt2=mt1; mt2<mtop->nmoltype; mt2++)
 +            {
 +                at2 = &mtop->moltype[mt2].atoms;
 +                for(j=0; (j<at2->nr); j++) {
 +                    tpj = at2->atom[j].type;
 +                    if (tpj >= ntypes)
 +                    {
 +                        gmx_fatal(FARGS,"Atomtype[%d] = %d, maximum = %d",j,tpj,ntypes);
 +                    }
 +                    b = BHAMB(nbfp,ntypes,tpi,tpj);
 +                    if (b > fr->bham_b_max)
 +                    {
 +                        fr->bham_b_max = b;
 +                    }
 +                    if ((b < bmin) || (bmin==-1))
 +                    {
 +                        bmin = b;
 +                    }
 +                }
 +            }
 +        }
 +    }
 +    if (fplog)
 +    {
 +        fprintf(fplog,"Buckingham b parameters, min: %g, max: %g\n",
 +                bmin,fr->bham_b_max);
 +    }
 +}
 +
 +static void make_nbf_tables(FILE *fp,const output_env_t oenv,
 +                            t_forcerec *fr,real rtab,
 +                            const t_commrec *cr,
 +                            const char *tabfn,char *eg1,char *eg2,
 +                            t_nblists *nbl)
 +{
 +    char buf[STRLEN];
 +    int i,j;
 +
 +    if (tabfn == NULL) {
 +        if (debug)
 +            fprintf(debug,"No table file name passed, can not read table, can not do non-bonded interactions\n");
 +        return;
 +    }
 +
 +    sprintf(buf,"%s",tabfn);
 +    if (eg1 && eg2)
 +    /* Append the two energy group names */
 +        sprintf(buf + strlen(tabfn) - strlen(ftp2ext(efXVG)) - 1,"_%s_%s.%s",
 +                eg1,eg2,ftp2ext(efXVG));
 +    nbl->table_elec_vdw = make_tables(fp,oenv,fr,MASTER(cr),buf,rtab,0);
 +    /* Copy the contents of the table to separate coulomb and LJ tables too,
 +     * to improve cache performance.
 +     */
 +    /* For performance reasons we want
 +     * the table data to be aligned to 16-byte. The pointers could be freed
 +     * but currently aren't.
 +     */
 +    nbl->table_elec.interaction = GMX_TABLE_INTERACTION_ELEC;
 +    nbl->table_elec.format = nbl->table_elec_vdw.format;
 +    nbl->table_elec.r = nbl->table_elec_vdw.r;
 +    nbl->table_elec.n = nbl->table_elec_vdw.n;
 +    nbl->table_elec.scale = nbl->table_elec_vdw.scale;
 +    nbl->table_elec.scale_exp = nbl->table_elec_vdw.scale_exp;
 +    nbl->table_elec.formatsize = nbl->table_elec_vdw.formatsize;
 +    nbl->table_elec.ninteractions = 1;
 +    nbl->table_elec.stride = nbl->table_elec.formatsize * nbl->table_elec.ninteractions;
-     snew_aligned(nbl->table_vdw.data,nbl->table_vdw.stride*(nbl->table_vdw.n+1),16);
++    snew_aligned(nbl->table_elec.data,nbl->table_elec.stride*(nbl->table_elec.n+1),32);
 +
 +    nbl->table_vdw.interaction = GMX_TABLE_INTERACTION_VDWREP_VDWDISP;
 +    nbl->table_vdw.format = nbl->table_elec_vdw.format;
 +    nbl->table_vdw.r = nbl->table_elec_vdw.r;
 +    nbl->table_vdw.n = nbl->table_elec_vdw.n;
 +    nbl->table_vdw.scale = nbl->table_elec_vdw.scale;
 +    nbl->table_vdw.scale_exp = nbl->table_elec_vdw.scale_exp;
 +    nbl->table_vdw.formatsize = nbl->table_elec_vdw.formatsize;
 +    nbl->table_vdw.ninteractions = 2;
 +    nbl->table_vdw.stride = nbl->table_vdw.formatsize * nbl->table_vdw.ninteractions;
- #ifdef GMX_NBNXN_SIMD_2XNN
++    snew_aligned(nbl->table_vdw.data,nbl->table_vdw.stride*(nbl->table_vdw.n+1),32);
 +
 +    for(i=0; i<=nbl->table_elec_vdw.n; i++)
 +    {
 +        for(j=0; j<4; j++)
 +            nbl->table_elec.data[4*i+j] = nbl->table_elec_vdw.data[12*i+j];
 +        for(j=0; j<8; j++)
 +            nbl->table_vdw.data[8*i+j] = nbl->table_elec_vdw.data[12*i+4+j];
 +    }
 +}
 +
 +static void count_tables(int ftype1,int ftype2,const gmx_mtop_t *mtop,
 +                         int *ncount,int **count)
 +{
 +    const gmx_moltype_t *molt;
 +    const t_ilist *il;
 +    int mt,ftype,stride,i,j,tabnr;
 +    
 +    for(mt=0; mt<mtop->nmoltype; mt++)
 +    {
 +        molt = &mtop->moltype[mt];
 +        for(ftype=0; ftype<F_NRE; ftype++)
 +        {
 +            if (ftype == ftype1 || ftype == ftype2) {
 +                il = &molt->ilist[ftype];
 +                stride = 1 + NRAL(ftype);
 +                for(i=0; i<il->nr; i+=stride) {
 +                    tabnr = mtop->ffparams.iparams[il->iatoms[i]].tab.table;
 +                    if (tabnr < 0)
 +                        gmx_fatal(FARGS,"A bonded table number is smaller than 0: %d\n",tabnr);
 +                    if (tabnr >= *ncount) {
 +                        srenew(*count,tabnr+1);
 +                        for(j=*ncount; j<tabnr+1; j++)
 +                            (*count)[j] = 0;
 +                        *ncount = tabnr+1;
 +                    }
 +                    (*count)[tabnr]++;
 +                }
 +            }
 +        }
 +    }
 +}
 +
 +static bondedtable_t *make_bonded_tables(FILE *fplog,
 +                                         int ftype1,int ftype2,
 +                                         const gmx_mtop_t *mtop,
 +                                         const char *basefn,const char *tabext)
 +{
 +    int  i,ncount,*count;
 +    char tabfn[STRLEN];
 +    bondedtable_t *tab;
 +    
 +    tab = NULL;
 +    
 +    ncount = 0;
 +    count = NULL;
 +    count_tables(ftype1,ftype2,mtop,&ncount,&count);
 +    
 +    if (ncount > 0) {
 +        snew(tab,ncount);
 +        for(i=0; i<ncount; i++) {
 +            if (count[i] > 0) {
 +                sprintf(tabfn,"%s",basefn);
 +                sprintf(tabfn + strlen(basefn) - strlen(ftp2ext(efXVG)) - 1,"_%s%d.%s",
 +                        tabext,i,ftp2ext(efXVG));
 +                tab[i] = make_bonded_table(fplog,tabfn,NRAL(ftype1)-2);
 +            }
 +        }
 +        sfree(count);
 +    }
 +  
 +    return tab;
 +}
 +
 +void forcerec_set_ranges(t_forcerec *fr,
 +                         int ncg_home,int ncg_force,
 +                         int natoms_force,
 +                         int natoms_force_constr,int natoms_f_novirsum)
 +{
 +    fr->cg0 = 0;
 +    fr->hcg = ncg_home;
 +
 +    /* fr->ncg_force is unused in the standard code,
 +     * but it can be useful for modified code dealing with charge groups.
 +     */
 +    fr->ncg_force           = ncg_force;
 +    fr->natoms_force        = natoms_force;
 +    fr->natoms_force_constr = natoms_force_constr;
 +
 +    if (fr->natoms_force_constr > fr->nalloc_force)
 +    {
 +        fr->nalloc_force = over_alloc_dd(fr->natoms_force_constr);
 +
 +        if (fr->bTwinRange)
 +        {
 +            srenew(fr->f_twin,fr->nalloc_force);
 +        }
 +    }
 +
 +    if (fr->bF_NoVirSum)
 +    {
 +        fr->f_novirsum_n = natoms_f_novirsum;
 +        if (fr->f_novirsum_n > fr->f_novirsum_nalloc)
 +        {
 +            fr->f_novirsum_nalloc = over_alloc_dd(fr->f_novirsum_n);
 +            srenew(fr->f_novirsum_alloc,fr->f_novirsum_nalloc);
 +        }
 +    }
 +    else
 +    {
 +        fr->f_novirsum_n = 0;
 +    }
 +}
 +
 +static real cutoff_inf(real cutoff)
 +{
 +    if (cutoff == 0)
 +    {
 +        cutoff = GMX_CUTOFF_INF;
 +    }
 +
 +    return cutoff;
 +}
 +
 +static void make_adress_tf_tables(FILE *fp,const output_env_t oenv,
 +                            t_forcerec *fr,const t_inputrec *ir,
 +                          const char *tabfn, const gmx_mtop_t *mtop,
 +                            matrix     box)
 +{
 +  char buf[STRLEN];
 +  int i,j;
 +
 +  if (tabfn == NULL) {
 +        gmx_fatal(FARGS,"No thermoforce table file given. Use -tabletf to specify a file\n");
 +    return;
 +  }
 +
 +  snew(fr->atf_tabs, ir->adress->n_tf_grps);
 +
 +  for (i=0; i<ir->adress->n_tf_grps; i++){
 +    j = ir->adress->tf_table_index[i]; /* get energy group index */
 +    sprintf(buf + strlen(tabfn) - strlen(ftp2ext(efXVG)) - 1,"tf_%s.%s",
 +        *(mtop->groups.grpname[mtop->groups.grps[egcENER].nm_ind[j]]) ,ftp2ext(efXVG));
 +    printf("loading tf table for energygrp index %d from %s\n", ir->adress->tf_table_index[j], buf);
 +    fr->atf_tabs[i] = make_atf_table(fp,oenv,fr,buf, box);
 +  }
 +
 +}
 +
 +gmx_bool can_use_allvsall(const t_inputrec *ir, const gmx_mtop_t *mtop,
 +                      gmx_bool bPrintNote,t_commrec *cr,FILE *fp)
 +{
 +    gmx_bool bAllvsAll;
 +
 +    bAllvsAll =
 +        (
 +         ir->rlist==0            &&
 +         ir->rcoulomb==0         &&
 +         ir->rvdw==0             &&
 +         ir->ePBC==epbcNONE      &&
 +         ir->vdwtype==evdwCUT    &&
 +         ir->coulombtype==eelCUT &&
 +         ir->efep==efepNO        &&
 +         (ir->implicit_solvent == eisNO || 
 +          (ir->implicit_solvent==eisGBSA && (ir->gb_algorithm==egbSTILL || 
 +                                             ir->gb_algorithm==egbHCT   || 
 +                                             ir->gb_algorithm==egbOBC))) &&
 +         getenv("GMX_NO_ALLVSALL") == NULL
 +            );
 +    
 +    if (bAllvsAll && ir->opts.ngener > 1)
 +    {
 +        const char *note="NOTE: Can not use all-vs-all force loops, because there are multiple energy monitor groups; you might get significantly higher performance when using only a single energy monitor group.\n";
 +
 +        if (bPrintNote)
 +        {
 +            if (MASTER(cr))
 +            {
 +                fprintf(stderr,"\n%s\n",note);
 +            }
 +            if (fp != NULL)
 +            {
 +                fprintf(fp,"\n%s\n",note);
 +            }
 +        }
 +        bAllvsAll = FALSE;
 +    }
 +
 +    if(bAllvsAll && fp && MASTER(cr))
 +    {
 +        fprintf(fp,"\nUsing accelerated all-vs-all kernels.\n\n");
 +    }
 +    
 +    return bAllvsAll;
 +}
 +
 +
 +static void init_forcerec_f_threads(t_forcerec *fr,int nenergrp)
 +{
 +    int t,i;
 +
 +    /* These thread local data structures are used for bondeds only */
 +    fr->nthreads = gmx_omp_nthreads_get(emntBonded);
 +
 +    if (fr->nthreads > 1)
 +    {
 +        snew(fr->f_t,fr->nthreads);
 +        /* Thread 0 uses the global force and energy arrays */
 +        for(t=1; t<fr->nthreads; t++)
 +        {
 +            fr->f_t[t].f = NULL;
 +            fr->f_t[t].f_nalloc = 0;
 +            snew(fr->f_t[t].fshift,SHIFTS);
 +            fr->f_t[t].grpp.nener = nenergrp*nenergrp;
 +            for(i=0; i<egNR; i++)
 +            {
 +                snew(fr->f_t[t].grpp.ener[i],fr->f_t[t].grpp.nener);
 +            }
 +        }
 +    }
 +}
 +
 +
 +static void pick_nbnxn_kernel_cpu(FILE *fp,
 +                                  const t_commrec *cr,
 +                                  const gmx_cpuid_t cpuid_info,
 +                                  const t_inputrec *ir,
 +                                  int *kernel_type,
 +                                  int *ewald_excl)
 +{
 +    *kernel_type = nbnxnk4x4_PlainC;
 +    *ewald_excl  = ewaldexclTable;
 +
 +#ifdef GMX_NBNXN_SIMD
 +    {
 +#ifdef GMX_NBNXN_SIMD_4XN
 +        *kernel_type = nbnxnk4xN_SIMD_4xN;
 +#endif
 +#ifdef GMX_NBNXN_SIMD_2XNN
 +        /* We expect the 2xNN kernels to be faster in most cases */
 +        *kernel_type = nbnxnk4xN_SIMD_2xNN;
 +#endif
 +
 +#if defined GMX_NBNXN_SIMD_4XN && defined GMX_X86_AVX_256
 +        if (EEL_RF(ir->coulombtype) || ir->coulombtype == eelCUT)
 +        {
 +            /* The raw pair rate of the 4x8 kernel is higher than 2x(4+4),
 +             * 10% with HT, 50% without HT, but extra zeros interactions
 +             * can compensate. As we currently don't detect the actual use
 +             * of HT, switch to 4x8 to avoid a potential performance hit.
 +             */
 +            *kernel_type = nbnxnk4xN_SIMD_4xN;
 +        }
 +#endif
 +        if (getenv("GMX_NBNXN_SIMD_4XN") != NULL)
 +        {
- /* Note that _mm_... intrinsics can be converted to either SSE or AVX
-  * depending on compiler flags.
-  * For gcc we check for __AVX__
-  * At least a check for icc should be added (if there is a macro)
-  */
- static const char *nbk_name[] =
-   { "not set", "plain C 4x4",
- #if !(defined GMX_X86_AVX_256 || defined GMX_X86_AVX128_FMA || defined __AVX__)
++#ifdef GMX_NBNXN_SIMD_4XN
 +            *kernel_type = nbnxnk4xN_SIMD_4xN;
 +#else
 +            gmx_fatal(FARGS,"SIMD 4xN kernels requested, but Gromacs has been compiled without support for these kernels");
 +#endif
 +        }
 +        if (getenv("GMX_NBNXN_SIMD_2XNN") != NULL)
 +        {
 +#ifdef GMX_NBNXN_SIMD_2XNN
 +            *kernel_type = nbnxnk4xN_SIMD_2xNN;
 +#else
 +            gmx_fatal(FARGS,"SIMD 2x(N+N) kernels requested, but Gromacs has been compiled without support for these kernels");
 +#endif
 +        }
 +
 +        /* Analytical Ewald exclusion correction is only an option in the
 +         * x86 SIMD kernel. This is faster in single precision
 +         * on Bulldozer and slightly faster on Sandy Bridge.
 +         */
 +#if (defined GMX_X86_AVX_128_FMA || defined GMX_X86_AVX_256) && !defined GMX_DOUBLE
 +        *ewald_excl = ewaldexclAnalytical;
 +#endif
 +        if (getenv("GMX_NBNXN_EWALD_TABLE") != NULL)
 +        {
 +            *ewald_excl = ewaldexclTable;
 +        }
 +        if (getenv("GMX_NBNXN_EWALD_ANALYTICAL") != NULL)
 +        {
 +            *ewald_excl = ewaldexclAnalytical;
 +        }
 +
 +    }
 +#endif /* GMX_X86_SSE2 */
 +}
 +
 +
- #ifndef GMX_DOUBLE
-     "SSE2 4x4",
++const char *lookup_nbnxn_kernel_name(int kernel_type)
++{
++    const char *returnvalue = NULL;
++    switch(kernel_type)
++    {
++    case nbnxnkNotSet: returnvalue = "not set"; break;
++    case nbnxnk4x4_PlainC: returnvalue = "plain C"; break;
++#ifndef GMX_NBNXN_SIMD
++    case nbnxnk4xN_SIMD_4xN: returnvalue = "not available"; break;
++    case nbnxnk4xN_SIMD_2xNN: returnvalue = "not available"; break;
++#else
++#ifdef GMX_X86_SSE2
++#if GMX_NBNXN_SIMD_BITWIDTH == 128
++        /* x86 SIMD intrinsics can be converted to either SSE or AVX depending
++         * on compiler flags. As we use nearly identical intrinsics, using an AVX
++         * compiler flag without an AVX macro effectively results in AVX kernels.
++         * For gcc we check for __AVX__
++         * At least a check for icc should be added (if there is a macro)
++         */
++#if !(defined GMX_X86_AVX_128_FMA || defined __AVX__)
 +#ifndef GMX_X86_SSE4_1
-     "SSE2 4x2",
++    case nbnxnk4xN_SIMD_4xN: returnvalue = "SSE2"; break;
++    case nbnxnk4xN_SIMD_2xNN: returnvalue = "SSE2"; break;
 +#else
- #ifndef GMX_DOUBLE
-     "SSE4.1 4x4",
- #else
-     "SSE4.1 4x2",
++    case nbnxnk4xN_SIMD_4xN: returnvalue = "SSE4.1"; break;
++    case nbnxnk4xN_SIMD_2xNN: returnvalue = "SSE4.1"; break;
 +#endif
 +#else
- #else
- #ifndef GMX_DOUBLE
-     "AVX-128 4x4",
- #else
-     "AVX-128 4x2",
++    case nbnxnk4xN_SIMD_4xN: returnvalue = "AVX-128"; break;
++    case nbnxnk4xN_SIMD_2xNN: returnvalue = "AVX-128"; break;
 +#endif
 +#endif
- #ifndef GMX_DOUBLE
-     "AVX-256 4x8",
- #else
-     "AVX-256 4x4",
++#if GMX_NBNXN_SIMD_BITWIDTH == 256
++    case nbnxnk4xN_SIMD_4xN: returnvalue = "AVX-256"; break;
++    case nbnxnk4xN_SIMD_2xNN: returnvalue = "AVX-256"; break;
 +#endif
++#else /* not GMX_X86_SSE2 */
++    case nbnxnk4xN_SIMD_4xN: returnvalue = "SIMD"; break;
++    case nbnxnk4xN_SIMD_2xNN: returnvalue = "SIMD"; break;
 +#endif
-     "CUDA 8x8x8", "plain C 8x8x8" };
 +#endif
-                 nbnxn_kernel_name[*kernel_type],
++    case nbnxnk8x8x8_CUDA: returnvalue = "CUDA"; break;
++    case nbnxnk8x8x8_PlainC: returnvalue = "plain C"; break;
++
++    case nbnxnkNR:
++    default:
++        gmx_fatal(FARGS, "Illegal kernel type selected");
++        returnvalue = NULL;
++        break;
++    }
++    return returnvalue;
++};
 +
 +static void pick_nbnxn_kernel(FILE *fp,
 +                              const t_commrec *cr,
 +                              const gmx_hw_info_t *hwinfo,
 +                              gmx_bool use_cpu_acceleration,
 +                              gmx_bool *bUseGPU,
 +                              const t_inputrec *ir,
 +                              int *kernel_type,
 +                              int *ewald_excl,
 +                              gmx_bool bDoNonbonded)
 +{
 +    gmx_bool bEmulateGPU, bGPU, bEmulateGPUEnvVarSet;
 +    char gpu_err_str[STRLEN];
 +
 +    assert(kernel_type);
 +
 +    *kernel_type = nbnxnkNotSet;
 +    *ewald_excl  = ewaldexclTable;
 +
 +    bEmulateGPUEnvVarSet = (getenv("GMX_EMULATE_GPU") != NULL);
 +
 +    /* if bUseGPU == NULL we don't want a GPU (e.g. hybrid mode kernel selection) */
 +    bGPU = ((bUseGPU != NULL) && hwinfo->bCanUseGPU);
 +
 +    /* Run GPU emulation mode if GMX_EMULATE_GPU is defined. We will
 +     * automatically switch to emulation if non-bonded calculations are
 +     * turned off via GMX_NO_NONBONDED - this is the simple and elegant
 +     * way to turn off GPU initialization, data movement, and cleanup. */
 +    bEmulateGPU = (bEmulateGPUEnvVarSet || (!bDoNonbonded && bGPU));
 +
 +    /* Enable GPU mode when GPUs are available or GPU emulation is requested.
 +     * The latter is useful to assess the performance one can expect by adding
 +     * GPU(s) to the machine. The conditional below allows this even if mdrun
 +     * is compiled without GPU acceleration support.
 +     * Note that such a GPU acceleration performance assessment should be
 +     * carried out by setting the GMX_EMULATE_GPU and GMX_NO_NONBONDED env. vars
 +     * (and freezing the system as otherwise it would explode). */
 +    if (bGPU || bEmulateGPUEnvVarSet)
 +    {
 +        if (bEmulateGPU)
 +        {
 +            bGPU = FALSE;
 +        }
 +        else
 +        {
 +            /* Each PP node will use the intra-node id-th device from the
 +             * list of detected/selected GPUs. */
 +            if (!init_gpu(cr->rank_pp_intranode, gpu_err_str, &hwinfo->gpu_info))
 +            {
 +                /* At this point the init should never fail as we made sure that
 +                 * we have all the GPUs we need. If it still does, we'll bail. */
 +                gmx_fatal(FARGS, "On node %d failed to initialize GPU #%d: %s",
 +                          cr->nodeid,
 +                          get_gpu_device_id(&hwinfo->gpu_info, cr->rank_pp_intranode),
 +                          gpu_err_str);
 +            }
 +        }
 +        *bUseGPU = bGPU;
 +    }
 +
 +    if (bEmulateGPU)
 +    {
 +        *kernel_type = nbnxnk8x8x8_PlainC;
 +
 +        if (bDoNonbonded)
 +        {
 +            md_print_warn(cr, fp, "Emulating a GPU run on the CPU (slow)");
 +        }
 +    }
 +    else if (bGPU)
 +    {
 +        *kernel_type = nbnxnk8x8x8_CUDA;
 +    }
 +
 +    if (*kernel_type == nbnxnkNotSet)
 +    {
 +        if (use_cpu_acceleration)
 +        {
 +            pick_nbnxn_kernel_cpu(fp,cr,hwinfo->cpuid_info,ir,
 +                                  kernel_type,ewald_excl);
 +        }
 +        else
 +        {
 +            *kernel_type = nbnxnk4x4_PlainC;
 +        }
 +    }
 +
 +    if (bDoNonbonded && fp != NULL)
 +    {
 +        fprintf(fp,"\nUsing %s %dx%d non-bonded kernels\n\n",
-     snew_aligned(ic->tabq_coul_FDV0,ic->tabq_size*4,16);
-     snew_aligned(ic->tabq_coul_F,ic->tabq_size,16);
-     snew_aligned(ic->tabq_coul_V,ic->tabq_size,16);
++                lookup_nbnxn_kernel_name(*kernel_type),
 +                nbnxn_kernel_pairlist_simple(*kernel_type) ? NBNXN_CPU_CLUSTER_I_SIZE : NBNXN_GPU_CLUSTER_SIZE,
 +                nbnxn_kernel_to_cj_size(*kernel_type));
 +    }
 +}
 +
 +gmx_bool uses_simple_tables(int cutoff_scheme,
 +                            nonbonded_verlet_t *nbv,
 +                            int group)
 +{
 +    gmx_bool bUsesSimpleTables = TRUE;
 +    int grp_index;
 +
 +    switch(cutoff_scheme)
 +    {
 +    case ecutsGROUP:
 +        bUsesSimpleTables = TRUE;
 +        break;
 +    case ecutsVERLET:
 +        assert(NULL != nbv && NULL != nbv->grp);
 +        grp_index = (group < 0) ? 0 : (nbv->ngrp - 1);
 +        bUsesSimpleTables = nbnxn_kernel_pairlist_simple(nbv->grp[grp_index].kernel_type);
 +        break;
 +    default:
 +        gmx_incons("unimplemented");
 +    }
 +    return bUsesSimpleTables;
 +}
 +
 +static void init_ewald_f_table(interaction_const_t *ic,
 +                               gmx_bool bUsesSimpleTables,
 +                               real rtab)
 +{
 +    real maxr;
 +
 +    if (bUsesSimpleTables)
 +    {
 +        /* With a spacing of 0.0005 we are at the force summation accuracy
 +         * for the SSE kernels for "normal" atomistic simulations.
 +         */
 +        ic->tabq_scale = ewald_spline3_table_scale(ic->ewaldcoeff,
 +                                                   ic->rcoulomb);
 +        
 +        maxr = (rtab>ic->rcoulomb) ? rtab : ic->rcoulomb;
 +        ic->tabq_size  = (int)(maxr*ic->tabq_scale) + 2;
 +    }
 +    else
 +    {
 +        ic->tabq_size = GPU_EWALD_COULOMB_FORCE_TABLE_SIZE;
 +        /* Subtract 2 iso 1 to avoid access out of range due to rounding */
 +        ic->tabq_scale = (ic->tabq_size - 2)/ic->rcoulomb;
 +    }
 +
 +    sfree_aligned(ic->tabq_coul_FDV0);
 +    sfree_aligned(ic->tabq_coul_F);
 +    sfree_aligned(ic->tabq_coul_V);
 +
 +    /* Create the original table data in FDV0 */
-     snew_aligned(ic->tabq_coul_FDV0,16,16);
-     snew_aligned(ic->tabq_coul_F,16,16);
-     snew_aligned(ic->tabq_coul_V,16,16);
++    snew_aligned(ic->tabq_coul_FDV0,ic->tabq_size*4,32);
++    snew_aligned(ic->tabq_coul_F,ic->tabq_size,32);
++    snew_aligned(ic->tabq_coul_V,ic->tabq_size,32);
 +    table_spline3_fill_ewald_lr(ic->tabq_coul_F,ic->tabq_coul_V,ic->tabq_coul_FDV0,
 +                                ic->tabq_size,1/ic->tabq_scale,ic->ewaldcoeff);
 +}
 +
 +void init_interaction_const_tables(FILE *fp, 
 +                                   interaction_const_t *ic,
 +                                   gmx_bool bUsesSimpleTables,
 +                                   real rtab)
 +{
 +    real spacing;
 +
 +    if (ic->eeltype == eelEWALD || EEL_PME(ic->eeltype))
 +    {
 +        init_ewald_f_table(ic,bUsesSimpleTables,rtab);
 +
 +        if (fp != NULL)
 +        {
 +            fprintf(fp,"Initialized non-bonded Ewald correction tables, spacing: %.2e size: %d\n\n",
 +                    1/ic->tabq_scale,ic->tabq_size);
 +        }
 +    }
 +}
 +
 +void init_interaction_const(FILE *fp, 
 +                            interaction_const_t **interaction_const,
 +                            const t_forcerec *fr,
 +                            real  rtab)
 +{
 +    interaction_const_t *ic;
 +    gmx_bool bUsesSimpleTables = TRUE;
 +
 +    snew(ic, 1);
 +
 +    /* Just allocate something so we can free it */
++    snew_aligned(ic->tabq_coul_FDV0,16,32);
++    snew_aligned(ic->tabq_coul_F,16,32);
++    snew_aligned(ic->tabq_coul_V,16,32);
 +
 +    ic->rlist       = fr->rlist;
 +    ic->rlistlong   = fr->rlistlong;
 +    
 +    /* Lennard-Jones */
 +    ic->rvdw        = fr->rvdw;
 +    if (fr->vdw_modifier==eintmodPOTSHIFT)
 +    {
 +        ic->sh_invrc6 = pow(ic->rvdw,-6.0);
 +    }
 +    else
 +    {
 +        ic->sh_invrc6 = 0;
 +    }
 +
 +    /* Electrostatics */
 +    ic->eeltype     = fr->eeltype;
 +    ic->rcoulomb    = fr->rcoulomb;
 +    ic->epsilon_r   = fr->epsilon_r;
 +    ic->epsfac      = fr->epsfac;
 +
 +    /* Ewald */
 +    ic->ewaldcoeff  = fr->ewaldcoeff;
 +    if (fr->coulomb_modifier==eintmodPOTSHIFT)
 +    {
 +        ic->sh_ewald = gmx_erfc(ic->ewaldcoeff*ic->rcoulomb);
 +    }
 +    else
 +    {
 +        ic->sh_ewald = 0;
 +    }
 +
 +    /* Reaction-field */
 +    if (EEL_RF(ic->eeltype))
 +    {
 +        ic->epsilon_rf = fr->epsilon_rf;
 +        ic->k_rf       = fr->k_rf;
 +        ic->c_rf       = fr->c_rf;
 +    }
 +    else
 +    {
 +        /* For plain cut-off we might use the reaction-field kernels */
 +        ic->epsilon_rf = ic->epsilon_r;
 +        ic->k_rf       = 0;
 +        if (fr->coulomb_modifier==eintmodPOTSHIFT)
 +        {
 +            ic->c_rf   = 1/ic->rcoulomb;
 +        }
 +        else
 +        {
 +            ic->c_rf   = 0;
 +        }
 +    }
 +
 +    if (fp != NULL)
 +    {
 +        fprintf(fp,"Potential shift: LJ r^-12: %.3f r^-6 %.3f",
 +                sqr(ic->sh_invrc6),ic->sh_invrc6);
 +        if (ic->eeltype == eelCUT)
 +        {
 +            fprintf(fp,", Coulomb %.3f",ic->c_rf);
 +        }
 +        else if (EEL_PME(ic->eeltype))
 +        {
 +            fprintf(fp,", Ewald %.3e",ic->sh_ewald);
 +        }
 +        fprintf(fp,"\n");
 +    }
 +
 +    *interaction_const = ic;
 +
 +    if (fr->nbv != NULL && fr->nbv->bUseGPU)
 +    {
 +        nbnxn_cuda_init_const(fr->nbv->cu_nbv, ic, fr->nbv);
 +    }
 +
 +    bUsesSimpleTables = uses_simple_tables(fr->cutoff_scheme, fr->nbv, -1);
 +    init_interaction_const_tables(fp,ic,bUsesSimpleTables,rtab);
 +}
 +
 +static void init_nb_verlet(FILE *fp,
 +                           nonbonded_verlet_t **nb_verlet,
 +                           const t_inputrec *ir,
 +                           const t_forcerec *fr,
 +                           const t_commrec *cr,
 +                           const char *nbpu_opt)
 +{
 +    nonbonded_verlet_t *nbv;
 +    int  i;
 +    char *env;
 +    gmx_bool bHybridGPURun = FALSE;
 +
 +    nbnxn_alloc_t *nb_alloc;
 +    nbnxn_free_t  *nb_free;
 +
 +    snew(nbv, 1);
 +
 +    nbv->nbs = NULL;
 +
 +    nbv->ngrp = (DOMAINDECOMP(cr) ? 2 : 1);
 +    for(i=0; i<nbv->ngrp; i++)
 +    {
 +        nbv->grp[i].nbl_lists.nnbl = 0;
 +        nbv->grp[i].nbat           = NULL;
 +        nbv->grp[i].kernel_type    = nbnxnkNotSet;
 +
 +        if (i == 0) /* local */
 +        {
 +            pick_nbnxn_kernel(fp, cr, fr->hwinfo, fr->use_cpu_acceleration,
 +                              &nbv->bUseGPU,
 +                              ir,
 +                              &nbv->grp[i].kernel_type,
 +                              &nbv->grp[i].ewald_excl,
 +                              fr->bNonbonded);
 +        }
 +        else /* non-local */
 +        {
 +            if (nbpu_opt != NULL && strcmp(nbpu_opt,"gpu_cpu") == 0)
 +            {
 +                /* Use GPU for local, select a CPU kernel for non-local */
 +                pick_nbnxn_kernel(fp, cr, fr->hwinfo, fr->use_cpu_acceleration,
 +                                  NULL,
 +                                  ir,
 +                                  &nbv->grp[i].kernel_type,
 +                                  &nbv->grp[i].ewald_excl,
 +                                  fr->bNonbonded);
 +
 +                bHybridGPURun = TRUE;
 +            }
 +            else
 +            {
 +                /* Use the same kernel for local and non-local interactions */
 +                nbv->grp[i].kernel_type = nbv->grp[0].kernel_type;
 +                nbv->grp[i].ewald_excl  = nbv->grp[0].ewald_excl;
 +            }
 +        }
 +    }
 +
 +    if (nbv->bUseGPU)
 +    {
 +        /* init the NxN GPU data; the last argument tells whether we'll have
 +         * both local and non-local NB calculation on GPU */
 +        nbnxn_cuda_init(fp, &nbv->cu_nbv,
 +                        &fr->hwinfo->gpu_info, cr->rank_pp_intranode,
 +                        (nbv->ngrp > 1) && !bHybridGPURun);
 +
 +        if ((env = getenv("GMX_NB_MIN_CI")) != NULL)
 +        {
 +            char *end;
 +
 +            nbv->min_ci_balanced = strtol(env, &end, 10);
 +            if (!end || (*end != 0) || nbv->min_ci_balanced <= 0)
 +            {
 +                gmx_fatal(FARGS, "Invalid value passed in GMX_NB_MIN_CI=%s, positive integer required", env);
 +            }
 +
 +            if (debug)
 +            {
 +                fprintf(debug, "Neighbor-list balancing parameter: %d (passed as env. var.)\n", 
 +                        nbv->min_ci_balanced);
 +            }
 +        }
 +        else
 +        {
 +            nbv->min_ci_balanced = nbnxn_cuda_min_ci_balanced(nbv->cu_nbv);
 +            if (debug)
 +            {
 +                fprintf(debug, "Neighbor-list balancing parameter: %d (auto-adjusted to the number of GPU multi-processors)\n",
 +                        nbv->min_ci_balanced);
 +            }
 +        }
 +    }
 +    else
 +    {
 +        nbv->min_ci_balanced = 0;
 +    }
 +
 +    *nb_verlet = nbv;
 +
 +    nbnxn_init_search(&nbv->nbs,
 +                      DOMAINDECOMP(cr) ? & cr->dd->nc : NULL,
 +                      DOMAINDECOMP(cr) ? domdec_zones(cr->dd) : NULL,
 +                      gmx_omp_nthreads_get(emntNonbonded));
 +
 +    for(i=0; i<nbv->ngrp; i++)
 +    {
 +        if (nbv->grp[0].kernel_type == nbnxnk8x8x8_CUDA)
 +        {
 +            nb_alloc = &pmalloc;
 +            nb_free  = &pfree;
 +        }
 +        else
 +        {
 +            nb_alloc = NULL;
 +            nb_free  = NULL;
 +        }
 +
 +        nbnxn_init_pairlist_set(&nbv->grp[i].nbl_lists,
 +                                nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type),
 +                                /* 8x8x8 "non-simple" lists are ATM always combined */
 +                                !nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type),
 +                                nb_alloc, nb_free);
 +
 +        if (i == 0 ||
 +            nbv->grp[0].kernel_type != nbv->grp[i].kernel_type)
 +        {
 +            snew(nbv->grp[i].nbat,1);
 +            nbnxn_atomdata_init(fp,
 +                                nbv->grp[i].nbat,
 +                                nbv->grp[i].kernel_type,
 +                                fr->ntype,fr->nbfp,
 +                                ir->opts.ngener,
 +                                nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type) ? gmx_omp_nthreads_get(emntNonbonded) : 1,
 +                                nb_alloc, nb_free);
 +        }
 +        else
 +        {
 +            nbv->grp[i].nbat = nbv->grp[0].nbat;
 +        }
 +    }
 +}
 +
 +void init_forcerec(FILE *fp,
 +                   const output_env_t oenv,
 +                   t_forcerec *fr,
 +                   t_fcdata   *fcd,
 +                   const t_inputrec *ir,
 +                   const gmx_mtop_t *mtop,
 +                   const t_commrec  *cr,
 +                   matrix     box,
 +                   gmx_bool       bMolEpot,
 +                   const char *tabfn,
 +                   const char *tabafn,
 +                   const char *tabpfn,
 +                   const char *tabbfn,
 +                   const char *nbpu_opt,
 +                   gmx_bool   bNoSolvOpt,
 +                   real       print_force)
 +{
 +    int     i,j,m,natoms,ngrp,negp_pp,negptable,egi,egj;
 +    real    rtab;
 +    char    *env;
 +    double  dbl;
 +    rvec    box_size;
 +    const t_block *cgs;
 +    gmx_bool    bGenericKernelOnly;
 +    gmx_bool    bTab,bSep14tab,bNormalnblists;
 +    t_nblists *nbl;
 +    int     *nm_ind,egp_flags;
 +    
 +    /* By default we turn acceleration on, but it might be turned off further down... */
 +    fr->use_cpu_acceleration = TRUE;
 +
 +    fr->bDomDec = DOMAINDECOMP(cr);
 +
 +    natoms = mtop->natoms;
 +
 +    if (check_box(ir->ePBC,box))
 +    {
 +        gmx_fatal(FARGS,check_box(ir->ePBC,box));
 +    }
 +    
 +    /* Test particle insertion ? */
 +    if (EI_TPI(ir->eI)) {
 +        /* Set to the size of the molecule to be inserted (the last one) */
 +        /* Because of old style topologies, we have to use the last cg
 +         * instead of the last molecule type.
 +         */
 +        cgs = &mtop->moltype[mtop->molblock[mtop->nmolblock-1].type].cgs;
 +        fr->n_tpi = cgs->index[cgs->nr] - cgs->index[cgs->nr-1];
 +        if (fr->n_tpi != mtop->mols.index[mtop->mols.nr] - mtop->mols.index[mtop->mols.nr-1]) {
 +            gmx_fatal(FARGS,"The molecule to insert can not consist of multiple charge groups.\nMake it a single charge group.");
 +        }
 +    } else {
 +        fr->n_tpi = 0;
 +    }
 +    
 +    /* Copy AdResS parameters */
 +    if (ir->bAdress) {
 +      fr->adress_type     = ir->adress->type;
 +      fr->adress_const_wf = ir->adress->const_wf;
 +      fr->adress_ex_width = ir->adress->ex_width;
 +      fr->adress_hy_width = ir->adress->hy_width;
 +      fr->adress_icor     = ir->adress->icor;
 +      fr->adress_site     = ir->adress->site;
 +      fr->adress_ex_forcecap = ir->adress->ex_forcecap;
 +      fr->adress_do_hybridpairs = ir->adress->do_hybridpairs;
 +
 +
 +      snew(fr->adress_group_explicit , ir->adress->n_energy_grps);
 +      for (i=0; i< ir->adress->n_energy_grps; i++){
 +          fr->adress_group_explicit[i]= ir->adress->group_explicit[i];
 +      }
 +
 +      fr->n_adress_tf_grps = ir->adress->n_tf_grps;
 +      snew(fr->adress_tf_table_index, fr->n_adress_tf_grps);
 +      for (i=0; i< fr->n_adress_tf_grps; i++){
 +          fr->adress_tf_table_index[i]= ir->adress->tf_table_index[i];
 +      }
 +      copy_rvec(ir->adress->refs,fr->adress_refs);
 +    } else {
 +      fr->adress_type = eAdressOff;
 +      fr->adress_do_hybridpairs = FALSE;
 +    }
 +    
 +    /* Copy the user determined parameters */
 +    fr->userint1 = ir->userint1;
 +    fr->userint2 = ir->userint2;
 +    fr->userint3 = ir->userint3;
 +    fr->userint4 = ir->userint4;
 +    fr->userreal1 = ir->userreal1;
 +    fr->userreal2 = ir->userreal2;
 +    fr->userreal3 = ir->userreal3;
 +    fr->userreal4 = ir->userreal4;
 +    
 +    /* Shell stuff */
 +    fr->fc_stepsize = ir->fc_stepsize;
 +    
 +    /* Free energy */
 +    fr->efep       = ir->efep;
 +    fr->sc_alphavdw = ir->fepvals->sc_alpha;
 +    if (ir->fepvals->bScCoul)
 +    {
 +        fr->sc_alphacoul = ir->fepvals->sc_alpha;
 +        fr->sc_sigma6_min = pow(ir->fepvals->sc_sigma_min,6);
 +    }
 +    else
 +    {
 +        fr->sc_alphacoul = 0;
 +        fr->sc_sigma6_min = 0; /* only needed when bScCoul is on */
 +    }
 +    fr->sc_power   = ir->fepvals->sc_power;
 +    fr->sc_r_power   = ir->fepvals->sc_r_power;
 +    fr->sc_sigma6_def = pow(ir->fepvals->sc_sigma,6);
 +
 +    env = getenv("GMX_SCSIGMA_MIN");
 +    if (env != NULL)
 +    {
 +        dbl = 0;
 +        sscanf(env,"%lf",&dbl);
 +        fr->sc_sigma6_min = pow(dbl,6);
 +        if (fp)
 +        {
 +            fprintf(fp,"Setting the minimum soft core sigma to %g nm\n",dbl);
 +        }
 +    }
 +
 +    fr->bNonbonded = TRUE;
 +    if (getenv("GMX_NO_NONBONDED") != NULL)
 +    {
 +        /* turn off non-bonded calculations */
 +        fr->bNonbonded = FALSE;
 +        md_print_warn(cr,fp,
 +                      "Found environment variable GMX_NO_NONBONDED.\n"
 +                      "Disabling nonbonded calculations.\n");
 +    }
 +
 +    bGenericKernelOnly = FALSE;
 +
 +    /* We now check in the NS code whether a particular combination of interactions
 +     * can be used with water optimization, and disable it if that is not the case.
 +     */
 +
 +    if (getenv("GMX_NB_GENERIC") != NULL)
 +    {
 +        if (fp != NULL)
 +        {
 +            fprintf(fp,
 +                    "Found environment variable GMX_NB_GENERIC.\n"
 +                    "Disabling all interaction-specific nonbonded kernels, will only\n"
 +                    "use the slow generic ones in src/gmxlib/nonbonded/nb_generic.c\n\n");
 +        }
 +        bGenericKernelOnly = TRUE;
 +    }
 +
 +    if (bGenericKernelOnly==TRUE)
 +    {
 +        bNoSolvOpt         = TRUE;
 +    }
 +
 +    if( (getenv("GMX_DISABLE_CPU_ACCELERATION") != NULL) || (getenv("GMX_NOOPTIMIZEDKERNELS") != NULL) )
 +    {
 +        fr->use_cpu_acceleration = FALSE;
 +        if (fp != NULL)
 +        {
 +            fprintf(fp,
 +                    "\nFound environment variable GMX_DISABLE_CPU_ACCELERATION.\n"
 +                    "Disabling all CPU architecture-specific (e.g. SSE2/SSE4/AVX) routines.\n\n");
 +        }
 +    }
 +
 +    fr->bBHAM = (mtop->ffparams.functype[0] == F_BHAM);
 +
 +    /* Check if we can/should do all-vs-all kernels */
 +    fr->bAllvsAll       = can_use_allvsall(ir,mtop,FALSE,NULL,NULL);
 +    fr->AllvsAll_work   = NULL;
 +    fr->AllvsAll_workgb = NULL;
 +
 +
 +    /* Neighbour searching stuff */
 +    fr->cutoff_scheme = ir->cutoff_scheme;
 +    fr->bGrid         = (ir->ns_type == ensGRID);
 +    fr->ePBC          = ir->ePBC;
 +
 +    /* Determine if we will do PBC for distances in bonded interactions */
 +    if (fr->ePBC == epbcNONE)
 +    {
 +        fr->bMolPBC = FALSE;
 +    }
 +    else
 +    {
 +        if (!DOMAINDECOMP(cr))
 +        {
 +            /* The group cut-off scheme and SHAKE assume charge groups
 +             * are whole, but not using molpbc is faster in most cases.
 +             */
 +            if (fr->cutoff_scheme == ecutsGROUP ||
 +                (ir->eConstrAlg == econtSHAKE &&
 +                 (gmx_mtop_ftype_count(mtop,F_CONSTR) > 0 ||
 +                  gmx_mtop_ftype_count(mtop,F_CONSTRNC) > 0)))
 +            {
 +                fr->bMolPBC = ir->bPeriodicMols;
 +            }
 +            else
 +            {
 +                fr->bMolPBC = TRUE;
 +                if (getenv("GMX_USE_GRAPH") != NULL)
 +                {
 +                    fr->bMolPBC = FALSE;
 +                    if (fp)
 +                    {
 +                        fprintf(fp,"\nGMX_MOLPBC is set, using the graph for bonded interactions\n\n");
 +                    }
 +                }
 +            }
 +        }
 +        else
 +        {
 +            fr->bMolPBC = dd_bonded_molpbc(cr->dd,fr->ePBC);
 +        }
 +    }
 +
 +    fr->rc_scaling = ir->refcoord_scaling;
 +    copy_rvec(ir->posres_com,fr->posres_com);
 +    copy_rvec(ir->posres_comB,fr->posres_comB);
 +    fr->rlist      = cutoff_inf(ir->rlist);
 +    fr->rlistlong  = cutoff_inf(ir->rlistlong);
 +    fr->eeltype    = ir->coulombtype;
 +    fr->vdwtype    = ir->vdwtype;
 +
 +    fr->coulomb_modifier = ir->coulomb_modifier;
 +    fr->vdw_modifier     = ir->vdw_modifier;
 +
 +    /* Electrostatics: Translate from interaction-setting-in-mdp-file to kernel interaction format */
 +    switch(fr->eeltype)
 +    {
 +        case eelCUT:
 +            fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_COULOMB;
 +            break;
 +
 +        case eelRF:
 +        case eelGRF:
 +        case eelRF_NEC:
 +            fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_REACTIONFIELD;
 +            break;
 +
 +        case eelRF_ZERO:
 +            fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_REACTIONFIELD;
 +            fr->coulomb_modifier          = eintmodEXACTCUTOFF;
 +            break;
 +
 +        case eelSWITCH:
 +        case eelSHIFT:
 +        case eelUSER:
 +        case eelENCADSHIFT:
 +        case eelPMESWITCH:
 +        case eelPMEUSER:
 +        case eelPMEUSERSWITCH:
 +            fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_CUBICSPLINETABLE;
 +            break;
 +
 +        case eelPME:
 +        case eelEWALD:
 +            fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_EWALD;
 +            break;
 +
 +        default:
 +            gmx_fatal(FARGS,"Unsupported electrostatic interaction: %s",eel_names[fr->eeltype]);
 +            break;
 +    }
 +
 +    /* Vdw: Translate from mdp settings to kernel format */
 +    switch(fr->vdwtype)
 +    {
 +        case evdwCUT:
 +            if(fr->bBHAM)
 +            {
 +                fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_BUCKINGHAM;
 +            }
 +            else
 +            {
 +                fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_LENNARDJONES;
 +            }
 +            break;
 +
 +        case evdwSWITCH:
 +        case evdwSHIFT:
 +        case evdwUSER:
 +        case evdwENCADSHIFT:
 +            fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_CUBICSPLINETABLE;
 +            break;
 +
 +        default:
 +            gmx_fatal(FARGS,"Unsupported vdw interaction: %s",evdw_names[fr->vdwtype]);
 +            break;
 +    }
 +
 +    /* These start out identical to ir, but might be altered if we e.g. tabulate the interaction in the kernel */
 +    fr->nbkernel_elec_modifier    = fr->coulomb_modifier;
 +    fr->nbkernel_vdw_modifier     = fr->vdw_modifier;
 +
 +    fr->bTwinRange = fr->rlistlong > fr->rlist;
 +    fr->bEwald     = (EEL_PME(fr->eeltype) || fr->eeltype==eelEWALD);
 +    
 +    fr->reppow     = mtop->ffparams.reppow;
 +
 +    if (ir->cutoff_scheme == ecutsGROUP)
 +    {
 +        fr->bvdwtab    = (fr->vdwtype != evdwCUT ||
 +                          !gmx_within_tol(fr->reppow,12.0,10*GMX_DOUBLE_EPS));
 +        /* We have special kernels for standard Ewald and PME, but the pme-switch ones are tabulated above */
 +        fr->bcoultab   = !(fr->eeltype == eelCUT ||
 +                           fr->eeltype == eelEWALD ||
 +                           fr->eeltype == eelPME ||
 +                           fr->eeltype == eelRF ||
 +                           fr->eeltype == eelRF_ZERO);
 +
 +        /* If the user absolutely wants different switch/shift settings for coul/vdw, it is likely
 +         * going to be faster to tabulate the interaction than calling the generic kernel.
 +         */
 +        if(fr->nbkernel_elec_modifier==eintmodPOTSWITCH && fr->nbkernel_vdw_modifier==eintmodPOTSWITCH)
 +        {
 +            if((fr->rcoulomb_switch != fr->rvdw_switch) || (fr->rcoulomb != fr->rvdw))
 +            {
 +                fr->bcoultab = TRUE;
 +            }
 +        }
 +        else if((fr->nbkernel_elec_modifier==eintmodPOTSHIFT && fr->nbkernel_vdw_modifier==eintmodPOTSHIFT) ||
 +                ((fr->nbkernel_elec_interaction == GMX_NBKERNEL_ELEC_REACTIONFIELD &&
 +                  fr->nbkernel_elec_modifier==eintmodEXACTCUTOFF &&
 +                  (fr->nbkernel_vdw_modifier==eintmodPOTSWITCH || fr->nbkernel_vdw_modifier==eintmodPOTSHIFT))))
 +        {
 +            if(fr->rcoulomb != fr->rvdw)
 +            {
 +                fr->bcoultab = TRUE;
 +            }
 +        }
 +
 +        if (getenv("GMX_REQUIRE_TABLES"))
 +        {
 +            fr->bvdwtab  = TRUE;
 +            fr->bcoultab = TRUE;
 +        }
 +
 +        if (fp)
 +        {
 +            fprintf(fp,"Table routines are used for coulomb: %s\n",bool_names[fr->bcoultab]);
 +            fprintf(fp,"Table routines are used for vdw:     %s\n",bool_names[fr->bvdwtab ]);
 +        }
 +
 +        if(fr->bvdwtab==TRUE)
 +        {
 +            fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_CUBICSPLINETABLE;
 +            fr->nbkernel_vdw_modifier    = eintmodNONE;
 +        }
 +        if(fr->bcoultab==TRUE)
 +        {
 +            fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_CUBICSPLINETABLE;
 +            fr->nbkernel_elec_modifier    = eintmodNONE;
 +        }
 +    }
 +
 +    if (ir->cutoff_scheme == ecutsVERLET)
 +    {
 +        if (!gmx_within_tol(fr->reppow,12.0,10*GMX_DOUBLE_EPS))
 +        {
 +            gmx_fatal(FARGS,"Cut-off scheme %S only supports LJ repulsion power 12",ecutscheme_names[ir->cutoff_scheme]);
 +        }
 +        fr->bvdwtab  = FALSE;
 +        fr->bcoultab = FALSE;
 +    }
 +    
 +    /* Tables are used for direct ewald sum */
 +    if(fr->bEwald)
 +    {
 +        if (EEL_PME(ir->coulombtype))
 +        {
 +            if (fp)
 +                fprintf(fp,"Will do PME sum in reciprocal space.\n");
 +            if (ir->coulombtype == eelP3M_AD)
 +            {
 +                please_cite(fp,"Hockney1988");
 +                please_cite(fp,"Ballenegger2012");
 +            }
 +            else
 +            {
 +                please_cite(fp,"Essmann95a");
 +            }
 +            
 +            if (ir->ewald_geometry == eewg3DC)
 +            {
 +                if (fp)
 +                {
 +                    fprintf(fp,"Using the Ewald3DC correction for systems with a slab geometry.\n");
 +                }
 +                please_cite(fp,"In-Chul99a");
 +            }
 +        }
 +        fr->ewaldcoeff=calc_ewaldcoeff(ir->rcoulomb, ir->ewald_rtol);
 +        init_ewald_tab(&(fr->ewald_table), cr, ir, fp);
 +        if (fp)
 +        {
 +            fprintf(fp,"Using a Gaussian width (1/beta) of %g nm for Ewald\n",
 +                    1/fr->ewaldcoeff);
 +        }
 +    }
 +    
 +    /* Electrostatics */
 +    fr->epsilon_r  = ir->epsilon_r;
 +    fr->epsilon_rf = ir->epsilon_rf;
 +    fr->fudgeQQ    = mtop->ffparams.fudgeQQ;
 +    fr->rcoulomb_switch = ir->rcoulomb_switch;
 +    fr->rcoulomb        = cutoff_inf(ir->rcoulomb);
 +    
 +    /* Parameters for generalized RF */
 +    fr->zsquare = 0.0;
 +    fr->temp    = 0.0;
 +    
 +    if (fr->eeltype == eelGRF)
 +    {
 +        init_generalized_rf(fp,mtop,ir,fr);
 +    }
 +    else if (fr->eeltype == eelSHIFT)
 +    {
 +        for(m=0; (m<DIM); m++)
 +            box_size[m]=box[m][m];
 +        
 +        if ((fr->eeltype == eelSHIFT && fr->rcoulomb > fr->rcoulomb_switch))
 +            set_shift_consts(fp,fr->rcoulomb_switch,fr->rcoulomb,box_size,fr);
 +    }
 +    
 +    fr->bF_NoVirSum = (EEL_FULL(fr->eeltype) ||
 +                       gmx_mtop_ftype_count(mtop,F_POSRES) > 0 ||
 +                       gmx_mtop_ftype_count(mtop,F_FBPOSRES) > 0 ||
 +                       IR_ELEC_FIELD(*ir) ||
 +                       (fr->adress_icor != eAdressICOff)
 +                      );
 +    
 +    if (fr->cutoff_scheme == ecutsGROUP &&
 +        ncg_mtop(mtop) > fr->cg_nalloc && !DOMAINDECOMP(cr)) {
 +        /* Count the total number of charge groups */
 +        fr->cg_nalloc = ncg_mtop(mtop);
 +        srenew(fr->cg_cm,fr->cg_nalloc);
 +    }
 +    if (fr->shift_vec == NULL)
 +        snew(fr->shift_vec,SHIFTS);
 +    
 +    if (fr->fshift == NULL)
 +        snew(fr->fshift,SHIFTS);
 +    
 +    if (fr->nbfp == NULL) {
 +        fr->ntype = mtop->ffparams.atnr;
 +        fr->nbfp  = mk_nbfp(&mtop->ffparams,fr->bBHAM);
 +    }
 +    
 +    /* Copy the energy group exclusions */
 +    fr->egp_flags = ir->opts.egp_flags;
 +    
 +    /* Van der Waals stuff */
 +    fr->rvdw        = cutoff_inf(ir->rvdw);
 +    fr->rvdw_switch = ir->rvdw_switch;
 +    if ((fr->vdwtype != evdwCUT) && (fr->vdwtype != evdwUSER) && !fr->bBHAM) {
 +        if (fr->rvdw_switch >= fr->rvdw)
 +            gmx_fatal(FARGS,"rvdw_switch (%f) must be < rvdw (%f)",
 +                      fr->rvdw_switch,fr->rvdw);
 +        if (fp)
 +            fprintf(fp,"Using %s Lennard-Jones, switch between %g and %g nm\n",
 +                    (fr->eeltype==eelSWITCH) ? "switched":"shifted",
 +                    fr->rvdw_switch,fr->rvdw);
 +    } 
 +    
 +    if (fr->bBHAM && (fr->vdwtype == evdwSHIFT || fr->vdwtype == evdwSWITCH))
 +        gmx_fatal(FARGS,"Switch/shift interaction not supported with Buckingham");
 +    
 +    if (fp)
 +        fprintf(fp,"Cut-off's:   NS: %g   Coulomb: %g   %s: %g\n",
 +                fr->rlist,fr->rcoulomb,fr->bBHAM ? "BHAM":"LJ",fr->rvdw);
 +    
 +    fr->eDispCorr = ir->eDispCorr;
 +    if (ir->eDispCorr != edispcNO)
 +    {
 +        set_avcsixtwelve(fp,fr,mtop);
 +    }
 +    
 +    if (fr->bBHAM)
 +    {
 +        set_bham_b_max(fp,fr,mtop);
 +    }
 +
 +    fr->bGB = (ir->implicit_solvent == eisGBSA);
 +      fr->gb_epsilon_solvent = ir->gb_epsilon_solvent;
 +
 +    /* Copy the GBSA data (radius, volume and surftens for each
 +     * atomtype) from the topology atomtype section to forcerec.
 +     */
 +    snew(fr->atype_radius,fr->ntype);
 +    snew(fr->atype_vol,fr->ntype);
 +    snew(fr->atype_surftens,fr->ntype);
 +    snew(fr->atype_gb_radius,fr->ntype);
 +    snew(fr->atype_S_hct,fr->ntype);
 +
 +    if (mtop->atomtypes.nr > 0)
 +    {
 +        for(i=0;i<fr->ntype;i++)
 +            fr->atype_radius[i] =mtop->atomtypes.radius[i];
 +        for(i=0;i<fr->ntype;i++)
 +            fr->atype_vol[i] = mtop->atomtypes.vol[i];
 +        for(i=0;i<fr->ntype;i++)
 +            fr->atype_surftens[i] = mtop->atomtypes.surftens[i];
 +        for(i=0;i<fr->ntype;i++)
 +            fr->atype_gb_radius[i] = mtop->atomtypes.gb_radius[i];
 +        for(i=0;i<fr->ntype;i++)
 +            fr->atype_S_hct[i] = mtop->atomtypes.S_hct[i];
 +    }  
 +      
 +      /* Generate the GB table if needed */
 +      if(fr->bGB)
 +      {
 +#ifdef GMX_DOUBLE
 +              fr->gbtabscale=2000;
 +#else
 +              fr->gbtabscale=500;
 +#endif
 +              
 +              fr->gbtabr=100;
 +              fr->gbtab=make_gb_table(fp,oenv,fr,tabpfn,fr->gbtabscale);
 +
 +        init_gb(&fr->born,cr,fr,ir,mtop,ir->rgbradii,ir->gb_algorithm);
 +
 +        /* Copy local gb data (for dd, this is done in dd_partition_system) */
 +        if (!DOMAINDECOMP(cr))
 +        {
 +            make_local_gb(cr,fr->born,ir->gb_algorithm);
 +        }
 +    }
 +
 +    /* Set the charge scaling */
 +    if (fr->epsilon_r != 0)
 +        fr->epsfac = ONE_4PI_EPS0/fr->epsilon_r;
 +    else
 +        /* eps = 0 is infinite dieletric: no coulomb interactions */
 +        fr->epsfac = 0;
 +    
 +    /* Reaction field constants */
 +    if (EEL_RF(fr->eeltype))
 +        calc_rffac(fp,fr->eeltype,fr->epsilon_r,fr->epsilon_rf,
 +                   fr->rcoulomb,fr->temp,fr->zsquare,box,
 +                   &fr->kappa,&fr->k_rf,&fr->c_rf);
 +    
 +    set_chargesum(fp,fr,mtop);
 +    
 +    /* if we are using LR electrostatics, and they are tabulated,
 +     * the tables will contain modified coulomb interactions.
 +     * Since we want to use the non-shifted ones for 1-4
 +     * coulombic interactions, we must have an extra set of tables.
 +     */
 +    
 +    /* Construct tables.
 +     * A little unnecessary to make both vdw and coul tables sometimes,
 +     * but what the heck... */
 +    
 +    bTab = fr->bcoultab || fr->bvdwtab || fr->bEwald;
 +
 +    bSep14tab = ((!bTab || fr->eeltype!=eelCUT || fr->vdwtype!=evdwCUT ||
 +                  fr->bBHAM || fr->bEwald) &&
 +                 (gmx_mtop_ftype_count(mtop,F_LJ14) > 0 ||
 +                  gmx_mtop_ftype_count(mtop,F_LJC14_Q) > 0 ||
 +                  gmx_mtop_ftype_count(mtop,F_LJC_PAIRS_NB) > 0));
 +
 +    negp_pp = ir->opts.ngener - ir->nwall;
 +    negptable = 0;
 +    if (!bTab) {
 +        bNormalnblists = TRUE;
 +        fr->nnblists = 1;
 +    } else {
 +        bNormalnblists = (ir->eDispCorr != edispcNO);
 +        for(egi=0; egi<negp_pp; egi++) {
 +            for(egj=egi;  egj<negp_pp; egj++) {
 +                egp_flags = ir->opts.egp_flags[GID(egi,egj,ir->opts.ngener)];
 +                if (!(egp_flags & EGP_EXCL)) {
 +                    if (egp_flags & EGP_TABLE) {
 +                        negptable++;
 +                    } else {
 +                        bNormalnblists = TRUE;
 +                    }
 +                }
 +            }
 +        }
 +        if (bNormalnblists) {
 +            fr->nnblists = negptable + 1;
 +        } else {
 +            fr->nnblists = negptable;
 +        }
 +        if (fr->nnblists > 1)
 +            snew(fr->gid2nblists,ir->opts.ngener*ir->opts.ngener);
 +    }
 +
 +    if (ir->adress){
 +        fr->nnblists*=2;
 +    }
 +
 +    snew(fr->nblists,fr->nnblists);
 +    
 +    /* This code automatically gives table length tabext without cut-off's,
 +     * in that case grompp should already have checked that we do not need
 +     * normal tables and we only generate tables for 1-4 interactions.
 +     */
 +    rtab = ir->rlistlong + ir->tabext;
 +
 +    if (bTab) {
 +        /* make tables for ordinary interactions */
 +        if (bNormalnblists) {
 +            make_nbf_tables(fp,oenv,fr,rtab,cr,tabfn,NULL,NULL,&fr->nblists[0]);
 +            if (ir->adress){
 +                make_nbf_tables(fp,oenv,fr,rtab,cr,tabfn,NULL,NULL,&fr->nblists[fr->nnblists/2]);
 +            }
 +            if (!bSep14tab)
 +                fr->tab14 = fr->nblists[0].table_elec_vdw;
 +            m = 1;
 +        } else {
 +            m = 0;
 +        }
 +        if (negptable > 0) {
 +            /* Read the special tables for certain energy group pairs */
 +            nm_ind = mtop->groups.grps[egcENER].nm_ind;
 +            for(egi=0; egi<negp_pp; egi++) {
 +                for(egj=egi;  egj<negp_pp; egj++) {
 +                    egp_flags = ir->opts.egp_flags[GID(egi,egj,ir->opts.ngener)];
 +                    if ((egp_flags & EGP_TABLE) && !(egp_flags & EGP_EXCL)) {
 +                        nbl = &(fr->nblists[m]);
 +                        if (fr->nnblists > 1) {
 +                            fr->gid2nblists[GID(egi,egj,ir->opts.ngener)] = m;
 +                        }
 +                        /* Read the table file with the two energy groups names appended */
 +                        make_nbf_tables(fp,oenv,fr,rtab,cr,tabfn,
 +                                        *mtop->groups.grpname[nm_ind[egi]],
 +                                        *mtop->groups.grpname[nm_ind[egj]],
 +                                        &fr->nblists[m]);
 +                        if (ir->adress){
 +                             make_nbf_tables(fp,oenv,fr,rtab,cr,tabfn,
 +                                        *mtop->groups.grpname[nm_ind[egi]],
 +                                        *mtop->groups.grpname[nm_ind[egj]],
 +                                        &fr->nblists[fr->nnblists/2+m]);
 +                        }
 +                        m++;
 +                    } else if (fr->nnblists > 1) {
 +                        fr->gid2nblists[GID(egi,egj,ir->opts.ngener)] = 0;
 +                    }
 +                }
 +            }
 +        }
 +    }
 +    if (bSep14tab)
 +    {
 +        /* generate extra tables with plain Coulomb for 1-4 interactions only */
 +        fr->tab14 = make_tables(fp,oenv,fr,MASTER(cr),tabpfn,rtab,
 +                                GMX_MAKETABLES_14ONLY);
 +    }
 +
 +    /* Read AdResS Thermo Force table if needed */
 +    if(fr->adress_icor == eAdressICThermoForce)
 +    {
 +        /* old todo replace */ 
 +        
 +        if (ir->adress->n_tf_grps > 0){
 +            make_adress_tf_tables(fp,oenv,fr,ir,tabfn, mtop, box);
 +
 +        }else{
 +            /* load the default table */
 +            snew(fr->atf_tabs, 1);
 +            fr->atf_tabs[DEFAULT_TF_TABLE] = make_atf_table(fp,oenv,fr,tabafn, box);
 +        }
 +    }
 +    
 +    /* Wall stuff */
 +    fr->nwall = ir->nwall;
 +    if (ir->nwall && ir->wall_type==ewtTABLE)
 +    {
 +        make_wall_tables(fp,oenv,ir,tabfn,&mtop->groups,fr);
 +    }
 +    
 +    if (fcd && tabbfn) {
 +        fcd->bondtab  = make_bonded_tables(fp,
 +                                           F_TABBONDS,F_TABBONDSNC,
 +                                           mtop,tabbfn,"b");
 +        fcd->angletab = make_bonded_tables(fp,
 +                                           F_TABANGLES,-1,
 +                                           mtop,tabbfn,"a");
 +        fcd->dihtab   = make_bonded_tables(fp,
 +                                           F_TABDIHS,-1,
 +                                           mtop,tabbfn,"d");
 +    } else {
 +        if (debug)
 +            fprintf(debug,"No fcdata or table file name passed, can not read table, can not do bonded interactions\n");
 +    }
 +    
 +    /* QM/MM initialization if requested
 +     */
 +    if (ir->bQMMM)
 +    {
 +        fprintf(stderr,"QM/MM calculation requested.\n");
 +    }
 +    
 +    fr->bQMMM      = ir->bQMMM;   
 +    fr->qr         = mk_QMMMrec();
 +    
 +    /* Set all the static charge group info */
 +    fr->cginfo_mb = init_cginfo_mb(fp,mtop,fr,bNoSolvOpt,
 +                                   &fr->bExcl_IntraCGAll_InterCGNone);
 +    if (DOMAINDECOMP(cr)) {
 +        fr->cginfo = NULL;
 +    } else {
 +        fr->cginfo = cginfo_expand(mtop->nmolblock,fr->cginfo_mb);
 +    }
 +    
 +    if (!DOMAINDECOMP(cr))
 +    {
 +        /* When using particle decomposition, the effect of the second argument,
 +         * which sets fr->hcg, is corrected later in do_md and init_em.
 +         */
 +        forcerec_set_ranges(fr,ncg_mtop(mtop),ncg_mtop(mtop),
 +                            mtop->natoms,mtop->natoms,mtop->natoms);
 +    }
 +    
 +    fr->print_force = print_force;
 +
 +
 +    /* coarse load balancing vars */
 +    fr->t_fnbf=0.;
 +    fr->t_wait=0.;
 +    fr->timesteps=0;
 +    
 +    /* Initialize neighbor search */
 +    init_ns(fp,cr,&fr->ns,fr,mtop,box);
 +
 +    if (cr->duty & DUTY_PP)
 +    {
 +        gmx_nonbonded_setup(fp,fr,bGenericKernelOnly);
 +    /*
 +     if (ir->bAdress)
 +        {
 +            gmx_setup_adress_kernels(fp,bGenericKernelOnly);
 +        }
 +     */
 +    }
 +
 +    /* Initialize the thread working data for bonded interactions */
 +    init_forcerec_f_threads(fr,mtop->groups.grps[egcENER].nr);
 +    
 +    snew(fr->excl_load,fr->nthreads+1);
 +
 +    if (fr->cutoff_scheme == ecutsVERLET)
 +    {
 +        if (ir->rcoulomb != ir->rvdw)
 +        {
 +            gmx_fatal(FARGS,"With Verlet lists rcoulomb and rvdw should be identical");
 +        }
 +
 +        init_nb_verlet(fp, &fr->nbv, ir, fr, cr, nbpu_opt);
 +    }
 +
 +    /* fr->ic is used both by verlet and group kernels (to some extent) now */
 +    init_interaction_const(fp, &fr->ic, fr, rtab);
 +    if (ir->eDispCorr != edispcNO)
 +    {
 +        calc_enervirdiff(fp,ir->eDispCorr,fr);
 +    }
 +}
 +
 +#define pr_real(fp,r) fprintf(fp,"%s: %e\n",#r,r)
 +#define pr_int(fp,i)  fprintf((fp),"%s: %d\n",#i,i)
 +#define pr_bool(fp,b) fprintf((fp),"%s: %s\n",#b,bool_names[b])
 +
 +void pr_forcerec(FILE *fp,t_forcerec *fr,t_commrec *cr)
 +{
 +  int i;
 +
 +  pr_real(fp,fr->rlist);
 +  pr_real(fp,fr->rcoulomb);
 +  pr_real(fp,fr->fudgeQQ);
 +  pr_bool(fp,fr->bGrid);
 +  pr_bool(fp,fr->bTwinRange);
 +  /*pr_int(fp,fr->cg0);
 +    pr_int(fp,fr->hcg);*/
 +  for(i=0; i<fr->nnblists; i++)
 +    pr_int(fp,fr->nblists[i].table_elec_vdw.n);
 +  pr_real(fp,fr->rcoulomb_switch);
 +  pr_real(fp,fr->rcoulomb);
 +  
 +  fflush(fp);
 +}
 +
 +void forcerec_set_excl_load(t_forcerec *fr,
 +                            const gmx_localtop_t *top,const t_commrec *cr)
 +{
 +    const int *ind,*a;
 +    int t,i,j,ntot,n,ntarget;
 +
 +    if (cr != NULL && PARTDECOMP(cr))
 +    {
 +        /* No OpenMP with particle decomposition */
 +        pd_at_range(cr,
 +                    &fr->excl_load[0],
 +                    &fr->excl_load[1]);
 +
 +        return;
 +    }
 +
 +    ind = top->excls.index;
 +    a   = top->excls.a;
 +
 +    ntot = 0;
 +    for(i=0; i<top->excls.nr; i++)
 +    {
 +        for(j=ind[i]; j<ind[i+1]; j++)
 +        {
 +            if (a[j] > i)
 +            {
 +                ntot++;
 +            }
 +        }
 +    }
 +
 +    fr->excl_load[0] = 0;
 +    n = 0;
 +    i = 0;
 +    for(t=1; t<=fr->nthreads; t++)
 +    {
 +        ntarget = (ntot*t)/fr->nthreads;
 +        while(i < top->excls.nr && n < ntarget)
 +        {
 +            for(j=ind[i]; j<ind[i+1]; j++)
 +            {
 +                if (a[j] > i)
 +                {
 +                    n++;
 +                }
 +            }
 +            i++;
 +        }
 +        fr->excl_load[t] = i;
 +    }
 +}
 +
index 230ead9eb47414763061f07ec5ee27e9bff88ad5,0000000000000000000000000000000000000000..cebefc09c5e1eeb69a408f52250957fe00e987ad
mode 100644,000000..100644
--- /dev/null
@@@ -1,841 -1,0 +1,841 @@@
-         fprintf(fplog, "\n Force evaluation time GPU/CPU: %.3f ms/%.3f ms = %.3f\n",
 +/*  -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2008, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 + 
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +
 +
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <string.h>
 +#include "gmx_wallcycle.h"
 +#include "gmx_cyclecounter.h"
 +#include "smalloc.h"
 +#include "gmx_fatal.h"
 +#include "md_logging.h"
 +#include "string2.h"
 +
 +#ifdef GMX_LIB_MPI
 +#include <mpi.h>
 +#endif
 +#ifdef GMX_THREAD_MPI
 +#include "tmpi.h"
 +#endif
 +
 +/* DEBUG_WCYCLE adds consistency checking for the counters.
 + * It checks if you stop a counter different from the last
 + * one that was opened and if you do nest too deep.
 + */
 +/* #define DEBUG_WCYCLE */
 +
 +typedef struct
 +{
 +    int          n;
 +    gmx_cycles_t c;
 +    gmx_cycles_t start;
 +    gmx_cycles_t last;
 +} wallcc_t;
 +
 +typedef struct gmx_wallcycle
 +{
 +    wallcc_t     *wcc;
 +    /* variables for testing/debugging */
 +    gmx_bool         wc_barrier;
 +    wallcc_t     *wcc_all;
 +    int          wc_depth;
 +#ifdef DEBUG_WCYCLE
 +#define DEPTH_MAX 6
 +    int          counterlist[DEPTH_MAX];
 +    int          count_depth;
 +#endif
 +    int          ewc_prev;
 +    gmx_cycles_t cycle_prev;
 +    gmx_large_int_t   reset_counters;
 +#ifdef GMX_MPI
 +    MPI_Comm     mpi_comm_mygroup;
 +#endif
 +    int          nthreads_pp;
 +    int          nthreads_pme;
 +#ifdef GMX_CYCLE_SUBCOUNTERS
 +    wallcc_t     *wcsc;
 +#endif
 +    double       *cycles_sum;
 +} gmx_wallcycle_t_t;
 +
 +/* Each name should not exceed 19 characters */
 +static const char *wcn[ewcNR] =
 +{ "Run", "Step", "PP during PME", "Domain decomp.", "DD comm. load",
 +  "DD comm. bounds", "Vsite constr.", "Send X to PME", "Neighbor search", "Launch GPU ops.",
 +  "Comm. coord.", "Born radii", "Force", "Wait + Comm. F", "PME mesh",
 +  "PME redist. X/F", "PME spread/gather", "PME 3D-FFT", "PME 3D-FFT Comm.", "PME solve",
 +  "PME wait for PP", "Wait + Recv. PME F", "Wait GPU nonlocal", "Wait GPU local", "NB X/F buffer ops.",
 +  "Vsite spread", "Write traj.", "Update", "Constraints", "Comm. energies",
 +  "Enforced rotation", "Add rot. forces", "Test" };
 +
 +static const char *wcsn[ewcsNR] =
 +{ "DD redist.", "DD NS grid + sort", "DD setup comm.",
 +  "DD make top.", "DD make constr.", "DD top. other",
 +  "NS grid local", "NS grid non-loc.", "NS search local", "NS search non-loc.",
 +  "Bonded F", "Nonbonded F", "Ewald F correction",
 +  "NB X buffer ops.", "NB F buffer ops."
 +};
 +
 +gmx_bool wallcycle_have_counter(void)
 +{
 +  return gmx_cycles_have_counter();
 +}
 +
 +gmx_wallcycle_t wallcycle_init(FILE *fplog,int resetstep,t_commrec *cr, 
 +                               int nthreads_pp, int nthreads_pme)
 +{
 +    gmx_wallcycle_t wc;
 +    
 +    
 +    if (!wallcycle_have_counter())
 +    {
 +        return NULL;
 +    }
 +
 +    snew(wc,1);
 +
 +    wc->wc_barrier          = FALSE;
 +    wc->wcc_all             = NULL;
 +    wc->wc_depth            = 0;
 +    wc->ewc_prev            = -1;
 +    wc->reset_counters      = resetstep;
 +    wc->nthreads_pp         = nthreads_pp;
 +    wc->nthreads_pme        = nthreads_pme;
 +    wc->cycles_sum          = NULL;
 +
 +#ifdef GMX_MPI
 +    if (PAR(cr) && getenv("GMX_CYCLE_BARRIER") != NULL)
 +    {
 +        if (fplog) 
 +        {
 +            fprintf(fplog,"\nWill call MPI_Barrier before each cycle start/stop call\n\n");
 +        }
 +        wc->wc_barrier = TRUE;
 +        wc->mpi_comm_mygroup = cr->mpi_comm_mygroup;
 +    }
 +#endif
 +
 +    snew(wc->wcc,ewcNR);
 +    if (getenv("GMX_CYCLE_ALL") != NULL)
 +    {
 +        if (fplog) 
 +        {
 +            fprintf(fplog,"\nWill time all the code during the run\n\n");
 +        }
 +        snew(wc->wcc_all,ewcNR*ewcNR);
 +    }
 +
 +#ifdef GMX_CYCLE_SUBCOUNTERS
 +    snew(wc->wcsc,ewcsNR);
 +#endif
 +
 +#ifdef DEBUG_WCYCLE
 +    wc->count_depth = 0;
 +#endif
 +
 +    return wc;
 +}
 +
 +void wallcycle_destroy(gmx_wallcycle_t wc)
 +{
 +    if (wc == NULL)
 +    {
 +        return;
 +    }
 +    
 +    if (wc->wcc != NULL)
 +    {
 +        sfree(wc->wcc);
 +    }
 +    if (wc->wcc_all != NULL)
 +    {
 +        sfree(wc->wcc_all);
 +    }
 +#ifdef GMX_CYCLE_SUBCOUNTERS
 +    if (wc->wcsc != NULL)
 +    {
 +        sfree(wc->wcsc);
 +    }
 +#endif
 +    sfree(wc);
 +}
 +
 +static void wallcycle_all_start(gmx_wallcycle_t wc,int ewc,gmx_cycles_t cycle)
 +{
 +    wc->ewc_prev = ewc;
 +    wc->cycle_prev = cycle;
 +}
 +
 +static void wallcycle_all_stop(gmx_wallcycle_t wc,int ewc,gmx_cycles_t cycle)
 +{
 +    wc->wcc_all[wc->ewc_prev*ewcNR+ewc].n += 1;
 +    wc->wcc_all[wc->ewc_prev*ewcNR+ewc].c += cycle - wc->cycle_prev;
 +}
 +
 +
 +#ifdef DEBUG_WCYCLE
 +static void debug_start_check(gmx_wallcycle_t wc, int ewc)
 +{
 +    /* fprintf(stderr,"wcycle_start depth %d, %s\n",wc->count_depth,wcn[ewc]); */
 +
 +    if (wc->count_depth < 0 || wc->count_depth >= DEPTH_MAX)
 +    {
 +        gmx_fatal(FARGS,"wallcycle counter depth out of range: %d",
 +                  wc->count_depth);
 +    }
 +    wc->counterlist[wc->count_depth] = ewc;
 +    wc->count_depth++;
 +}
 +
 +static void debug_stop_check(gmx_wallcycle_t wc, int ewc)
 +{
 +    wc->count_depth--;
 +
 +    /* fprintf(stderr,"wcycle_stop depth %d, %s\n",wc->count_depth,wcn[ewc]); */
 +
 +    if (wc->count_depth < 0)
 +    {
 +        gmx_fatal(FARGS,"wallcycle counter depth out of range when stopping %s: %d",wcn[ewc],wc->count_depth);
 +    }
 +    if (wc->counterlist[wc->count_depth] != ewc)
 +    {
 +        gmx_fatal(FARGS,"wallcycle mismatch at stop, start %s, stop %s",
 +                  wcn[wc->counterlist[wc->count_depth]],wcn[ewc]);
 +    }
 +}
 +#endif
 +
 +void wallcycle_start(gmx_wallcycle_t wc, int ewc)
 +{
 +    gmx_cycles_t cycle;
 +
 +    if (wc == NULL)
 +    {
 +        return;
 +    }
 +
 +#ifdef GMX_MPI
 +    if (wc->wc_barrier)
 +    {
 +        MPI_Barrier(wc->mpi_comm_mygroup);
 +    }
 +#endif
 +
 +#ifdef DEBUG_WCYCLE
 +    debug_start_check(wc,ewc);
 +#endif
 +
 +    cycle = gmx_cycles_read();
 +    wc->wcc[ewc].start = cycle;
 +    if (wc->wcc_all != NULL)
 +    {
 +        wc->wc_depth++;
 +        if (ewc == ewcRUN)
 +        {
 +            wallcycle_all_start(wc,ewc,cycle);
 +        }
 +        else if (wc->wc_depth == 3)
 +        {
 +            wallcycle_all_stop(wc,ewc,cycle);
 +        }
 +    }
 +}
 +
 +void wallcycle_start_nocount(gmx_wallcycle_t wc, int ewc)
 +{
 +    if (wc == NULL)
 +    {
 +        return;
 +    }
 +
 +    wallcycle_start(wc, ewc);
 +    wc->wcc[ewc].n--;
 +}
 +
 +double wallcycle_stop(gmx_wallcycle_t wc, int ewc)
 +{
 +    gmx_cycles_t cycle,last;
 +    
 +    if (wc == NULL)
 +    {
 +        return 0;
 +    }
 +    
 +#ifdef GMX_MPI
 +    if (wc->wc_barrier)
 +    {
 +        MPI_Barrier(wc->mpi_comm_mygroup);
 +    }
 +#endif
 +
 +#ifdef DEBUG_WCYCLE
 +    debug_stop_check(wc,ewc);
 +#endif
 +    
 +    cycle = gmx_cycles_read();
 +    last = cycle - wc->wcc[ewc].start;
 +    wc->wcc[ewc].c += last;
 +    wc->wcc[ewc].n++;
 +    if (wc->wcc_all)
 +    {
 +        wc->wc_depth--;
 +        if (ewc == ewcRUN)
 +        {
 +            wallcycle_all_stop(wc,ewc,cycle);
 +        }
 +        else if (wc->wc_depth == 2)
 +        {
 +            wallcycle_all_start(wc,ewc,cycle);
 +        }
 +    }
 +
 +    return last;
 +}
 +
 +void wallcycle_reset_all(gmx_wallcycle_t wc)
 +{
 +    int i;
 +
 +    if (wc == NULL)
 +    {
 +        return;
 +    }
 +
 +    for(i=0; i<ewcNR; i++)
 +    {
 +        wc->wcc[i].n = 0;
 +        wc->wcc[i].c = 0;
 +    }
 +    if (wc->wcc_all)
 +    {
 +        for(i=0; i<ewcNR*ewcNR; i++)
 +        {
 +            wc->wcc_all[i].n = 0;
 +            wc->wcc_all[i].c = 0;
 +        }
 +    }
 +#ifdef GMX_CYCLE_SUBCOUNTERS
 +    for (i=0; i<ewcsNR; i++)
 +    {
 +        wc->wcsc[i].n = 0;
 +        wc->wcsc[i].c = 0;
 +    }
 +#endif
 +}
 +
 +static gmx_bool is_pme_counter(int ewc)
 +{
 +    return (ewc >= ewcPMEMESH && ewc <= ewcPMEWAITCOMM);
 +}
 +
 +static gmx_bool is_pme_subcounter(int ewc)
 +{
 +    return (ewc >= ewcPME_REDISTXF && ewc < ewcPMEWAITCOMM);
 +}
 +
 +void wallcycle_sum(t_commrec *cr, gmx_wallcycle_t wc)
 +{
 +    wallcc_t *wcc;
 +    double *cycles;
 +    double cycles_n[ewcNR+ewcsNR],buf[ewcNR+ewcsNR],*cyc_all,*buf_all;
 +    int    i,j;
 +    int    nsum;
 +
 +    if (wc == NULL)
 +    {
 +        return;
 +    }
 +
 +    snew(wc->cycles_sum,ewcNR+ewcsNR);
 +    cycles = wc->cycles_sum;
 +
 +    wcc = wc->wcc;
 +
 +    for(i=0; i<ewcNR; i++)
 +    {
 +        if (is_pme_counter(i) || (i==ewcRUN && cr->duty == DUTY_PME))
 +        {
 +            wcc[i].c *= wc->nthreads_pme;
 +
 +            if (wc->wcc_all)
 +            {
 +                for(j=0; j<ewcNR; j++)
 +                {
 +                    wc->wcc_all[i*ewcNR+j].c *= wc->nthreads_pme;
 +                }
 +            }
 +        }
 +        else
 +        {
 +            wcc[i].c *= wc->nthreads_pp;
 +
 +            if (wc->wcc_all)
 +            {
 +                for(j=0; j<ewcNR; j++)
 +                {
 +                    wc->wcc_all[i*ewcNR+j].c *= wc->nthreads_pp;
 +                }
 +            }
 +        }
 +    }
 +
 +    if (wcc[ewcDDCOMMLOAD].n > 0)
 +    {
 +        wcc[ewcDOMDEC].c -= wcc[ewcDDCOMMLOAD].c;
 +    }
 +    if (wcc[ewcDDCOMMBOUND].n > 0)
 +    {
 +        wcc[ewcDOMDEC].c -= wcc[ewcDDCOMMBOUND].c;
 +    }
 +    if (wcc[ewcPME_FFTCOMM].n > 0)
 +    {
 +        wcc[ewcPME_FFT].c -= wcc[ewcPME_FFTCOMM].c;
 +    }
 +
 +    if (cr->npmenodes == 0)
 +    {
 +        /* All nodes do PME (or no PME at all) */
 +        if (wcc[ewcPMEMESH].n > 0)
 +        {
 +            wcc[ewcFORCE].c -= wcc[ewcPMEMESH].c;
 +        }
 +    }
 +    else
 +    {
 +        /* The are PME-only nodes */
 +        if (wcc[ewcPMEMESH].n > 0)
 +        {
 +            /* This must be a PME only node, calculate the Wait + Comm. time */
 +            wcc[ewcPMEWAITCOMM].c = wcc[ewcRUN].c - wcc[ewcPMEMESH].c;
 +        }
 +    }
 +    
 +    /* Store the cycles in a double buffer for summing */
 +    for(i=0; i<ewcNR; i++)
 +    {
 +        cycles_n[i] = (double)wcc[i].n;
 +        cycles[i]   = (double)wcc[i].c;
 +    }
 +    nsum = ewcNR;
 +#ifdef GMX_CYCLE_SUBCOUNTERS
 +    for(i=0; i<ewcsNR; i++)
 +    {
 +        wc->wcsc[i].c *= wc->nthreads_pp;
 +        cycles_n[ewcNR+i] = (double)wc->wcsc[i].n;
 +        cycles[ewcNR+i]   = (double)wc->wcsc[i].c;
 +    }
 +    nsum += ewcsNR;
 +#endif   
 +    
 +#ifdef GMX_MPI
 +    if (cr->nnodes > 1)
 +    {
 +        MPI_Allreduce(cycles_n,buf,nsum,MPI_DOUBLE,MPI_MAX,
 +                      cr->mpi_comm_mysim);
 +        for(i=0; i<ewcNR; i++)
 +        {
 +            wcc[i].n = (int)(buf[i] + 0.5);
 +        }
 +#ifdef GMX_CYCLE_SUBCOUNTERS
 +        for(i=0; i<ewcsNR; i++)
 +        {
 +            wc->wcsc[i].n = (int)(buf[ewcNR+i] + 0.5);
 +        }
 +#endif   
 +
 +        MPI_Allreduce(cycles,buf,nsum,MPI_DOUBLE,MPI_SUM,
 +                      cr->mpi_comm_mysim);
 +        for(i=0; i<nsum; i++)
 +        {
 +            cycles[i] = buf[i];
 +        }
 +
 +        if (wc->wcc_all != NULL)
 +        {
 +            snew(cyc_all,ewcNR*ewcNR);
 +            snew(buf_all,ewcNR*ewcNR);
 +            for(i=0; i<ewcNR*ewcNR; i++)
 +            {
 +                cyc_all[i] = wc->wcc_all[i].c;
 +            }
 +            MPI_Allreduce(cyc_all,buf_all,ewcNR*ewcNR,MPI_DOUBLE,MPI_SUM,
 +                          cr->mpi_comm_mysim);
 +            for(i=0; i<ewcNR*ewcNR; i++)
 +            {
 +                wc->wcc_all[i].c = buf_all[i];
 +            }
 +            sfree(buf_all);
 +            sfree(cyc_all);
 +        }
 +    }
 +#endif
 +}
 +
 +static void print_cycles(FILE *fplog, double c2t, const char *name, 
 +                         int nnodes_tot,int nnodes, int nthreads,
 +                         int n, double c, double tot)
 +{
 +    char num[11];
 +    char thstr[6];
 +    double wallt;
 +  
 +    if (c > 0)
 +    {
 +        if (n > 0)
 +        {
 +            snprintf(num,sizeof(num),"%10d",n);
 +            if (nthreads < 0)
 +                snprintf(thstr, sizeof(thstr), "N/A");
 +            else
 +                snprintf(thstr, sizeof(thstr), "%4d", nthreads);
 +        }
 +        else
 +        {
 +            sprintf(num,"          ");
 +            sprintf(thstr, "    ");
 +        }
 +        wallt = c*c2t*nnodes_tot/(double)nnodes;
 +        fprintf(fplog," %-19s %4d %4s %10s  %10.3f %12.3f   %5.1f\n",
 +                name,nnodes,thstr,num,wallt,c*1e-9,100*c/tot);
 +    }
 +}
 +
 +static void print_gputimes(FILE *fplog, const char *name, 
 +                           int n, double t, double tot_t)
 +{
 +    char num[11];
 +    char avg_perf[11];
 +
 +    if (n > 0)
 +    {
 +        snprintf(num, sizeof(num), "%10d", n);
 +        snprintf(avg_perf, sizeof(avg_perf), "%10.3f", t/n);
 +    }
 +    else
 +    {
 +      sprintf(num,"          ");
 +      sprintf(avg_perf,"          ");
 +    }
 +    if (t != tot_t)
 +    {
 +        fprintf(fplog, " %-29s %10s%12.3f   %s   %5.1f\n",
 +                name, num, t/1000, avg_perf, 100 * t/tot_t); 
 +    }
 +    else
 +    {
 +         fprintf(fplog, " %-29s %10s%12.3f   %s   %5.1f\n",
 +               name, "", t/1000, avg_perf, 100.0); 
 +    }
 +}
 +
 +void wallcycle_print(FILE *fplog, int nnodes, int npme, double realtime,
 +                     gmx_wallcycle_t wc, wallclock_gpu_t *gpu_t)
 +{
 +    double *cycles;
 +    double c2t,tot,tot_gpu,tot_cpu_overlap,gpu_cpu_ratio,sum,tot_k;
 +    int    i,j,npp,nth_pp,nth_pme;
 +    char   buf[STRLEN];
 +    const char *hline = "-----------------------------------------------------------------------------";
 +    
 +    if (wc == NULL)
 +    {
 +        return;
 +    }
 +
 +    nth_pp  = wc->nthreads_pp;
 +    nth_pme = wc->nthreads_pme;
 +
 +    cycles = wc->cycles_sum;
 +
 +    if (npme > 0)
 +    {
 +        npp = nnodes - npme;
 +    }
 +    else
 +    {
 +        npp  = nnodes;
 +        npme = nnodes;
 +    }
 +    tot = cycles[ewcRUN];
 +
 +    /* Conversion factor from cycles to seconds */
 +    if (tot > 0)
 +    {
 +        c2t = realtime/tot;
 +    }
 +    else
 +    {
 +        c2t = 0;
 +    }
 +
 +    fprintf(fplog,"\n     R E A L   C Y C L E   A N D   T I M E   A C C O U N T I N G\n\n");
 +
 +    fprintf(fplog," Computing:         Nodes   Th.     Count  Wall t (s)     G-Cycles       %c\n",'%');
 +    fprintf(fplog,"%s\n",hline);
 +    sum = 0;
 +    for(i=ewcPPDURINGPME+1; i<ewcNR; i++)
 +    {
 +        if (!is_pme_subcounter(i))
 +        {
 +            print_cycles(fplog,c2t,wcn[i],nnodes,
 +                         is_pme_counter(i) ? npme : npp,
 +                         is_pme_counter(i) ? nth_pme : nth_pp, 
 +                         wc->wcc[i].n,cycles[i],tot);
 +            sum += cycles[i];
 +        }
 +    }
 +    if (wc->wcc_all != NULL)
 +    {
 +        for(i=0; i<ewcNR; i++)
 +        {
 +            for(j=0; j<ewcNR; j++)
 +            {
 +                snprintf(buf,9,"%-9s",wcn[i]);
 +                buf[9] = ' ';
 +                snprintf(buf+10,9,"%-9s",wcn[j]);
 +                buf[19] = '\0';
 +                print_cycles(fplog,c2t,buf,nnodes,
 +                             is_pme_counter(i) ? npme : npp,
 +                             is_pme_counter(i) ? nth_pme : nth_pp,
 +                             wc->wcc_all[i*ewcNR+j].n,
 +                             wc->wcc_all[i*ewcNR+j].c,
 +                             tot);
 +            }
 +        }
 +    }
 +    print_cycles(fplog,c2t,"Rest",npp,npp,-1,0,tot-sum,tot);
 +    fprintf(fplog,"%s\n",hline);
 +    print_cycles(fplog,c2t,"Total",nnodes,nnodes,-1,0,tot,tot);
 +    fprintf(fplog,"%s\n",hline);
 +    
 +    if (wc->wcc[ewcPMEMESH].n > 0)
 +    {
 +        fprintf(fplog,"%s\n",hline);
 +        for(i=ewcPPDURINGPME+1; i<ewcNR; i++)
 +        {
 +            if (is_pme_subcounter(i))
 +            {
 +                print_cycles(fplog,c2t,wcn[i],nnodes,
 +                             is_pme_counter(i) ? npme : npp,
 +                             is_pme_counter(i) ? nth_pme : nth_pp,
 +                             wc->wcc[i].n,cycles[i],tot);
 +            }
 +        }
 +        fprintf(fplog,"%s\n",hline);
 +    }
 +
 +#ifdef GMX_CYCLE_SUBCOUNTERS
 +    fprintf(fplog,"%s\n",hline);
 +    for(i=0; i<ewcsNR; i++)
 +    {
 +        print_cycles(fplog,c2t,wcsn[i],nnodes,npp,nth_pp,
 +                     wc->wcsc[i].n,cycles[ewcNR+i],tot);
 +    }
 +    fprintf(fplog,"%s\n",hline);
 +#endif
 +
 +    /* print GPU timing summary */
 +    if (gpu_t)
 +    {
 +        const char *k_log_str[2][2] = {
 +                {"Nonbonded F kernel", "Nonbonded F+ene k."},
 +                {"Nonbonded F+prune k.", "Nonbonded F+ene+prune k."}};
 +
 +        tot_gpu = gpu_t->pl_h2d_t + gpu_t->nb_h2d_t + gpu_t->nb_d2h_t;
 +
 +        /* add up the kernel timings */
 +        tot_k = 0.0;
 +        for (i = 0; i < 2; i++)
 +        {
 +            for(j = 0; j < 2; j++)
 +            {
 +                tot_k += gpu_t->ktime[i][j].t;
 +            }
 +        }
 +        tot_gpu += tot_k;
 +    
 +        tot_cpu_overlap = wc->wcc[ewcFORCE].c;
 +        if (wc->wcc[ewcPMEMESH].n > 0)
 +        {
 +            tot_cpu_overlap += wc->wcc[ewcPMEMESH].c;
 +        }
 +        tot_cpu_overlap *= c2t * 1000; /* convert s to ms */
 +
 +        fprintf(fplog, "\n GPU timings\n%s\n", hline);
 +        fprintf(fplog," Computing:                         Count  Wall t (s)      ms/step       %c\n",'%');
 +        fprintf(fplog, "%s\n", hline);
 +        print_gputimes(fplog, "Pair list H2D",
 +                gpu_t->pl_h2d_c, gpu_t->pl_h2d_t, tot_gpu);
 +         print_gputimes(fplog, "X / q H2D", 
 +                gpu_t->nb_c, gpu_t->nb_h2d_t, tot_gpu);
 +
 +        for (i = 0; i < 2; i++)
 +        {
 +            for(j = 0; j < 2; j++)
 +            {
 +                if (gpu_t->ktime[i][j].c)
 +                {
 +                    print_gputimes(fplog, k_log_str[i][j],
 +                            gpu_t->ktime[i][j].c, gpu_t->ktime[i][j].t, tot_gpu);
 +                }
 +            }
 +        }        
 +
 +        print_gputimes(fplog, "F D2H",  gpu_t->nb_c, gpu_t->nb_d2h_t, tot_gpu);
 +        fprintf(fplog, "%s\n", hline);
 +        print_gputimes(fplog, "Total ", gpu_t->nb_c, tot_gpu, tot_gpu);
 +        fprintf(fplog, "%s\n", hline);
 +
 +        gpu_cpu_ratio = tot_gpu/tot_cpu_overlap;
-                                       "NOTE: The GPU has >25%% less load than the CPU. This imbalance causes\n"
++        fprintf(fplog, "\nForce evaluation time GPU/CPU: %.3f ms/%.3f ms = %.3f\n",
 +                tot_gpu/gpu_t->nb_c, tot_cpu_overlap/wc->wcc[ewcFORCE].n,
 +                gpu_cpu_ratio);
 +
 +        /* only print notes related to CPU-GPU load balance with PME */
 +        if (wc->wcc[ewcPMEMESH].n > 0)
 +        {
 +            fprintf(fplog, "For optimal performance this ratio should be close to 1!\n");
 +
 +            /* print note if the imbalance is high with PME case in which
 +             * CPU-GPU load balancing is possible */
 +            if (gpu_cpu_ratio < 0.75 || gpu_cpu_ratio > 1.2)
 +            {
 +                /* Only the sim master calls this function, so always print to stderr */
 +                if (gpu_cpu_ratio < 0.75)
 +                {
 +                    if (npp > 1)
 +                    {
 +                        /* The user could have used -notunepme,
 +                         * but we currently can't check that here.
 +                         */
 +                        md_print_warn(NULL,fplog,
-                                       "      In that case, try setting the DD grid manually (-dd) or lowering -dds.\n");
++                                      "\nNOTE: The GPU has >25%% less load than the CPU. This imbalance causes\n"
 +                                      "      performance loss. Maybe the domain decomposition limits the PME tuning.\n"
-                                       "NOTE: The GPU has >25%% less load than the CPU. This imbalance causes\n"
-                                       "      performance loss.\n");
++                                      "      In that case, try setting the DD grid manually (-dd) or lowering -dds.");
 +                    }
 +                    else
 +                    {
 +                        /* We should not end up here, unless the box is
 +                         * too small for increasing the cut-off for PME tuning.
 +                         */
 +                        md_print_warn(NULL,fplog,
-                                   "NOTE: The GPU has >20%% more load than the CPU. This imbalance causes\n"
-                                   "      performance loss, consider using a shorter cut-off and a finer PME grid.\n");
++                                      "\nNOTE: The GPU has >25%% less load than the CPU. This imbalance causes\n"
++                                      "      performance loss.");
 +                    }
 +                }
 +                if (gpu_cpu_ratio > 1.2)
 +                {
 +                    md_print_warn(NULL,fplog,
++                                  "\nNOTE: The GPU has >20%% more load than the CPU. This imbalance causes\n"
++                                  "      performance loss, consider using a shorter cut-off and a finer PME grid.");
 +                }
 +            }
 +        }
 +    }
 +
 +    if (wc->wcc[ewcNB_XF_BUF_OPS].n > 0 &&
 +        (cycles[ewcDOMDEC] > tot*0.1 ||
 +         cycles[ewcNS] > tot*0.1))
 +    {
 +        /* Only the sim master calls this function, so always print to stderr */
 +        if (wc->wcc[ewcDOMDEC].n == 0)
 +        {
 +            md_print_warn(NULL,fplog,
 +                          "NOTE: %d %% of the run time was spent in pair search,\n"
 +                          "      you might want to increase nstlist (this has no effect on accuracy)\n",
 +                          (int)(100*cycles[ewcNS]/tot+0.5));
 +        }
 +        else
 +        {
 +            md_print_warn(NULL,fplog,
 +                          "NOTE: %d %% of the run time was spent in domain decomposition,\n"
 +                          "      %d %% of the run time was spent in pair search,\n"
 +                          "      you might want to increase nstlist (this has no effect on accuracy)\n",
 +                          (int)(100*cycles[ewcDOMDEC]/tot+0.5),
 +                          (int)(100*cycles[ewcNS]/tot+0.5));
 +        }
 +    }
 +
 +    if (cycles[ewcMoveE] > tot*0.05)
 +    {
 +        /* Only the sim master calls this function, so always print to stderr */
 +        md_print_warn(NULL,fplog,
 +                      "NOTE: %d %% of the run time was spent communicating energies,\n"
 +                      "      you might want to use the -gcom option of mdrun\n",
 +                      (int)(100*cycles[ewcMoveE]/tot+0.5));
 +    }
 +}
 +
 +extern gmx_large_int_t wcycle_get_reset_counters(gmx_wallcycle_t wc)
 +{
 +    if (wc == NULL)
 +    {
 +        return -1;
 +    }
 +    
 +    return wc->reset_counters;
 +}
 +
 +extern void wcycle_set_reset_counters(gmx_wallcycle_t wc, gmx_large_int_t reset_counters)
 +{
 +    if (wc == NULL)
 +        return;
 +
 +    wc->reset_counters = reset_counters;
 +}
 +
 +#ifdef GMX_CYCLE_SUBCOUNTERS
 +
 +void wallcycle_sub_start(gmx_wallcycle_t wc, int ewcs)
 +{
 +    if (wc != NULL)
 +    {
 +        wc->wcsc[ewcs].start = gmx_cycles_read();
 +    }
 +}
 +
 +void wallcycle_sub_stop(gmx_wallcycle_t wc, int ewcs)
 +{
 +    if (wc != NULL)
 +    {
 +        wc->wcsc[ewcs].c += gmx_cycles_read() - wc->wcsc[ewcs].start;
 +        wc->wcsc[ewcs].n++;
 +    }
 +}
 +
 +#endif /* GMX_CYCLE_SUBCOUNTERS */
index 2219580e0e055b48f42b0fc0b2ab177d0996768f,0000000000000000000000000000000000000000..73aab70dfbf0f3698ca4785931435f0562c24ecd
mode 100644,000000..100644
--- /dev/null
@@@ -1,191 -1,0 +1,191 @@@
-   if (EI_SD(ir->eI) || ir->eI == eiBD || ir->etc == etcVRESCALE) {
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + *          GROningen MAchine for Chemical Simulations
 + *
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + *
 + * For more info, check our website at http://www.gromacs.org
 + *
 + * And Hey:
 + * GROwing Monsters And Cloning Shrimps
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <stdio.h>
 +#include "typedefs.h"
 +#include "tpxio.h"
 +#include "smalloc.h"
 +#include "vec.h"
 +#include "main.h"
 +#include "mvdata.h"
 +#include "gmx_fatal.h"
 +#include "symtab.h"
 +#include "txtdump.h"
 +#include "mdatoms.h"
 +#include "mdrun.h"
 +#include "statutil.h"
 +#include "names.h"
 +#include "calcgrid.h"
 +#include "gmx_random.h"
 +#include "update.h"
 +#include "mdebin.h"
 +
 +#define BUFSIZE       256
 +
 +#define NOT_FINISHED(l1,l2) \
 +  printf("not finished yet: lines %d .. %d in %s\n",l1,l2,__FILE__)
 +
 +static char *int_title(const char *title,int nodeid,char buf[], int size)
 +{
 +  sprintf(buf,"%s (%d)",title,nodeid);
 +
 +  return buf;
 +}
 +
 +void set_state_entries(t_state *state,const t_inputrec *ir,int nnodes)
 +{
 +  int nnhpres;
 +
 +  /* The entries in the state in the tpx file might not correspond
 +   * with what is needed, so we correct this here.
 +   */
 +  state->flags = 0;
 +  if (ir->efep != efepNO || ir->bExpanded)
 +  {
 +      state->flags |= (1<<estLAMBDA);
 +      state->flags |= (1<<estFEPSTATE);
 +  }
 +  state->flags |= (1<<estX);
 +  if (state->lambda==NULL)
 +    {
 +      snew(state->lambda,efptNR);
 +    }
 +  if (state->x == NULL)
 +    snew(state->x,state->nalloc);
 +  if (EI_DYNAMICS(ir->eI)) {
 +    state->flags |= (1<<estV);
 +    if (state->v == NULL)
 +      snew(state->v,state->nalloc);
 +  }
 +  if (ir->eI == eiSD2) {
 +    state->flags |= (1<<estSDX);
 +    if (state->sd_X == NULL) {
 +      /* sd_X is not stored in the tpx file, so we need to allocate it */
 +      snew(state->sd_X,state->nalloc);
 +    }
 +  }
 +    if (ir->eI == eiCG)
 +    {
 +        state->flags |= (1<<estCGP);
 +        if (state->cg_p == NULL)
 +        {
 +            /* cg_p is not stored in the tpx file, so we need to allocate it */
 +            snew(state->cg_p,state->nalloc);
 +        }
 +    }
-     if (EI_SD(ir->eI) || ir->eI == eiBD) {
++    if (EI_SD(ir->eI) || ir->eI == eiBD || ir->etc == etcVRESCALE || ETC_ANDERSEN(ir->etc)) {
 +    state->nrng  = gmx_rng_n();
 +    state->nrngi = 1;
-     if (inputrec->eI == eiBD || EI_SD(inputrec->eI)) {
++    if (EI_SD(ir->eI) || ir->eI == eiBD || ETC_ANDERSEN(ir->etc)) {
 +      /* This will be correct later with DD */
 +      state->nrng  *= nnodes;
 +      state->nrngi *= nnodes;
 +    }
 +    state->flags |= ((1<<estLD_RNG) | (1<<estLD_RNGI));
 +    snew(state->ld_rng, state->nrng);
 +    snew(state->ld_rngi,state->nrngi);
 +  } else {
 +    state->nrng = 0;
 +  }
 +
 +  if (ir->bExpanded)
 +  {
 +      state->nmcrng  = gmx_rng_n();
 +      snew(state->mc_rng,state->nmcrng);
 +      snew(state->mc_rngi,1);
 +  }
 +
 +  state->nnhpres = 0;
 +  if (ir->ePBC != epbcNONE) {
 +      state->flags |= (1<<estBOX);
 +      if (PRESERVE_SHAPE(*ir)) {
 +          state->flags |= (1<<estBOX_REL);
 +      }
 +      if ((ir->epc == epcPARRINELLORAHMAN) || (ir->epc == epcMTTK))
 +      {
 +          state->flags |= (1<<estBOXV);
 +      }
 +      if (ir->epc != epcNO)
 +      {
 +          if (IR_NPT_TROTTER(ir) || (IR_NPH_TROTTER(ir)))
 +          {
 +              state->nnhpres = 1;
 +              state->flags |= (1<<estNHPRES_XI);
 +              state->flags |= (1<<estNHPRES_VXI);
 +              state->flags |= (1<<estSVIR_PREV);
 +              state->flags |= (1<<estFVIR_PREV);
 +              state->flags |= (1<<estVETA);
 +              state->flags |= (1<<estVOL0);
 +          }
 +          else
 +          {
 +              state->flags |= (1<<estPRES_PREV);
 +          }
 +      }
 +  }
 +
 +  if (ir->etc == etcNOSEHOOVER) {
 +    state->flags |= (1<<estNH_XI);
 +    state->flags |= (1<<estNH_VXI);
 +  }
 +
 +  if (ir->etc == etcVRESCALE) {
 +    state->flags |= (1<<estTC_INT);
 +  }
 +
 +  init_gtc_state(state,state->ngtc,state->nnhpres,ir->opts.nhchainlength); /* allocate the space for nose-hoover chains */
 +  init_ekinstate(&state->ekinstate,ir);
 +
 +  init_energyhistory(&state->enerhist);
 +  init_df_history(&state->dfhist,ir->fepvals->n_lambda,ir->expandedvals->init_wl_delta);
 +}
 +
 +
 +void init_parallel(FILE *log, t_commrec *cr, t_inputrec *inputrec,
 +                   gmx_mtop_t *mtop)
 +{
 +    bcast_ir_mtop(cr,inputrec,mtop);
 +
++    if (inputrec->eI == eiBD || EI_SD(inputrec->eI) || ETC_ANDERSEN(inputrec->etc)) {
 +        /* Make sure the random seeds are different on each node */
 +        inputrec->ld_seed += cr->nodeid;
 +    }
 +}
 +
 +
index f6268d41a9c93c5d53cd4364e141095a2f745548,0000000000000000000000000000000000000000..4db41010c3c56e8865864f959c0a8e261597e256
mode 100644,000000..100644
--- /dev/null
@@@ -1,1310 -1,0 +1,1341 @@@
- /* Default nbnxn allocation routine, allocates 32 byte aligned,
-  * which works for plain C and aligned SSE and AVX loads/stores.
-  */
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + *          GROningen MAchine for Chemical Simulations
 + *
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2012, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + *
 + * For more info, check our website at http://www.gromacs.org
 + */
 +
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +#include <string.h>
 +#include "smalloc.h"
 +#include "macros.h"
 +#include "vec.h"
 +#include "nbnxn_consts.h"
 +#include "nbnxn_internal.h"
 +#include "nbnxn_search.h"
 +#include "nbnxn_atomdata.h"
 +#include "gmx_omp_nthreads.h"
 +
-     *ptr = save_malloc_aligned("ptr",__FILE__,__LINE__,nbytes,1,32);
++/* Default nbnxn allocation routine, allocates NBNXN_MEM_ALIGN byte aligned */
 +void nbnxn_alloc_aligned(void **ptr,size_t nbytes)
 +{
- nbnxn_atomdata_reduce_reals_x86_simd(real * gmx_restrict dest,
-                                      gmx_bool bDestSet,
-                                      real ** gmx_restrict src,
-                                      int nsrc,
-                                      int i0, int i1)
++    *ptr = save_malloc_aligned("ptr",__FILE__,__LINE__,nbytes,1,NBNXN_MEM_ALIGN);
 +}
 +
 +/* Free function for memory allocated with nbnxn_alloc_aligned */
 +void nbnxn_free_aligned(void *ptr)
 +{
 +    sfree_aligned(ptr);
 +}
 +
 +/* Reallocation wrapper function for nbnxn data structures */
 +void nbnxn_realloc_void(void **ptr,
 +                        int nbytes_copy,int nbytes_new,
 +                        nbnxn_alloc_t *ma,
 +                        nbnxn_free_t  *mf)
 +{
 +    void *ptr_new;
 +
 +    ma(&ptr_new,nbytes_new);
 +
 +    if (nbytes_new > 0 && ptr_new == NULL)
 +    {
 +        gmx_fatal(FARGS, "Allocation of %d bytes failed", nbytes_new);
 +    }
 +
 +    if (nbytes_copy > 0)
 +    {
 +        if (nbytes_new < nbytes_copy)
 +        {
 +            gmx_incons("In nbnxn_realloc_void: new size less than copy size");
 +        }
 +        memcpy(ptr_new,*ptr,nbytes_copy);
 +    }
 +    if (*ptr != NULL)
 +    {
 +        mf(*ptr);
 +    }
 +    *ptr = ptr_new;
 +}
 +
 +/* Reallocate the nbnxn_atomdata_t for a size of n atoms */
 +void nbnxn_atomdata_realloc(nbnxn_atomdata_t *nbat,int n)
 +{
 +    int t;
 +
 +    nbnxn_realloc_void((void **)&nbat->type,
 +                       nbat->natoms*sizeof(*nbat->type),
 +                       n*sizeof(*nbat->type),
 +                       nbat->alloc,nbat->free);
 +    nbnxn_realloc_void((void **)&nbat->lj_comb,
 +                       nbat->natoms*2*sizeof(*nbat->lj_comb),
 +                       n*2*sizeof(*nbat->lj_comb),
 +                       nbat->alloc,nbat->free);
 +    if (nbat->XFormat != nbatXYZQ)
 +    {
 +        nbnxn_realloc_void((void **)&nbat->q,
 +                           nbat->natoms*sizeof(*nbat->q),
 +                           n*sizeof(*nbat->q),
 +                           nbat->alloc,nbat->free);
 +    }
 +    if (nbat->nenergrp > 1)
 +    {
 +        nbnxn_realloc_void((void **)&nbat->energrp,
 +                           nbat->natoms/nbat->na_c*sizeof(*nbat->energrp),
 +                           n/nbat->na_c*sizeof(*nbat->energrp),
 +                           nbat->alloc,nbat->free);
 +    }
 +    nbnxn_realloc_void((void **)&nbat->x,
 +                       nbat->natoms*nbat->xstride*sizeof(*nbat->x),
 +                       n*nbat->xstride*sizeof(*nbat->x),
 +                       nbat->alloc,nbat->free);
 +    for(t=0; t<nbat->nout; t++)
 +    {
 +        /* Allocate one element extra for possible signaling with CUDA */
 +        nbnxn_realloc_void((void **)&nbat->out[t].f,
 +                           nbat->natoms*nbat->fstride*sizeof(*nbat->out[t].f),
 +                           n*nbat->fstride*sizeof(*nbat->out[t].f),
 +                           nbat->alloc,nbat->free);
 +    }
 +    nbat->nalloc = n;
 +}
 +
 +/* Initializes an nbnxn_atomdata_output_t data structure */
 +static void nbnxn_atomdata_output_init(nbnxn_atomdata_output_t *out,
 +                                       int nb_kernel_type,
 +                                       int nenergrp,int stride,
 +                                       nbnxn_alloc_t *ma)
 +{
 +    int cj_size;
 +
 +    out->f = NULL;
 +    ma((void **)&out->fshift,SHIFTS*DIM*sizeof(*out->fshift));
 +    out->nV = nenergrp*nenergrp;
 +    ma((void **)&out->Vvdw,out->nV*sizeof(*out->Vvdw));
 +    ma((void **)&out->Vc  ,out->nV*sizeof(*out->Vc  ));
 +
 +    if (nb_kernel_type == nbnxnk4xN_SIMD_4xN ||
 +        nb_kernel_type == nbnxnk4xN_SIMD_2xNN)
 +    {
 +        cj_size = nbnxn_kernel_to_cj_size(nb_kernel_type);
 +        out->nVS = nenergrp*nenergrp*stride*(cj_size>>1)*cj_size;
 +        ma((void **)&out->VSvdw,out->nVS*sizeof(*out->VSvdw));
 +        ma((void **)&out->VSc  ,out->nVS*sizeof(*out->VSc  ));
 +    }
 +    else
 +    {
 +        out->nVS = 0;
 +    }
 +}
 +
 +static void copy_int_to_nbat_int(const int *a,int na,int na_round,
 +                                 const int *in,int fill,int *innb)
 +{
 +    int i,j;
 +
 +    j = 0;
 +    for(i=0; i<na; i++)
 +    {
 +        innb[j++] = in[a[i]];
 +    }
 +    /* Complete the partially filled last cell with fill */
 +    for(; i<na_round; i++)
 +    {
 +        innb[j++] = fill;
 +    }
 +}
 +
 +static void clear_nbat_real(int na,int nbatFormat,real *xnb,int a0)
 +{
 +    int a,d,j,c;
 +
 +    switch (nbatFormat)
 +    {
 +    case nbatXYZ:
 +        for(a=0; a<na; a++)
 +        {
 +            for(d=0; d<DIM; d++)
 +            {
 +                xnb[(a0+a)*STRIDE_XYZ+d] = 0;
 +            }
 +        }
 +        break;
 +    case nbatXYZQ:
 +        for(a=0; a<na; a++)
 +        {
 +            for(d=0; d<DIM; d++)
 +            {
 +                xnb[(a0+a)*STRIDE_XYZQ+d] = 0;
 +            }
 +        }
 +        break;
 +    case nbatX4:
 +        j = X4_IND_A(a0);
 +        c = a0 & (PACK_X4-1);
 +        for(a=0; a<na; a++)
 +        {
 +            xnb[j+XX*PACK_X4] = 0;
 +            xnb[j+YY*PACK_X4] = 0;
 +            xnb[j+ZZ*PACK_X4] = 0;
 +            j++;
 +            c++;
 +            if (c == PACK_X4)
 +            {
 +                j += (DIM-1)*PACK_X4;
 +                c  = 0;
 +            }
 +        }
 +        break;
 +    case nbatX8:
 +        j = X8_IND_A(a0);
 +        c = a0 & (PACK_X8-1);
 +        for(a=0; a<na; a++)
 +        {
 +            xnb[j+XX*PACK_X8] = 0;
 +            xnb[j+YY*PACK_X8] = 0;
 +            xnb[j+ZZ*PACK_X8] = 0;
 +            j++;
 +            c++;
 +            if (c == PACK_X8)
 +            {
 +                j += (DIM-1)*PACK_X8;
 +                c  = 0;
 +            }
 +        }
 +        break;
 +    }
 +}
 +
 +void copy_rvec_to_nbat_real(const int *a,int na,int na_round,
 +                            rvec *x,int nbatFormat,real *xnb,int a0,
 +                            int cx,int cy,int cz)
 +{
 +    int i,j,c;
 +
 +/* We might need to place filler particles to fill up the cell to na_round.
 + * The coefficients (LJ and q) for such particles are zero.
 + * But we might still get NaN as 0*NaN when distances are too small.
 + * We hope that -107 nm is far away enough from to zero
 + * to avoid accidental short distances to particles shifted down for pbc.
 + */
 +#define NBAT_FAR_AWAY 107
 +
 +    switch (nbatFormat)
 +    {
 +    case nbatXYZ:
 +        j = a0*STRIDE_XYZ;
 +        for(i=0; i<na; i++)
 +        {
 +            xnb[j++] = x[a[i]][XX];
 +            xnb[j++] = x[a[i]][YY];
 +            xnb[j++] = x[a[i]][ZZ];
 +        }
 +        /* Complete the partially filled last cell with copies of the last element.
 +         * This simplifies the bounding box calculation and avoid
 +         * numerical issues with atoms that are coincidentally close.
 +         */
 +        for(; i<na_round; i++)
 +        {
 +            xnb[j++] = -NBAT_FAR_AWAY*(1 + cx);
 +            xnb[j++] = -NBAT_FAR_AWAY*(1 + cy);
 +            xnb[j++] = -NBAT_FAR_AWAY*(1 + cz + i);
 +        }
 +        break;
 +    case nbatXYZQ:
 +        j = a0*STRIDE_XYZQ;
 +        for(i=0; i<na; i++)
 +        {
 +            xnb[j++] = x[a[i]][XX];
 +            xnb[j++] = x[a[i]][YY];
 +            xnb[j++] = x[a[i]][ZZ];
 +            j++;
 +        }
 +        /* Complete the partially filled last cell with particles far apart */
 +        for(; i<na_round; i++)
 +        {
 +            xnb[j++] = -NBAT_FAR_AWAY*(1 + cx);
 +            xnb[j++] = -NBAT_FAR_AWAY*(1 + cy);
 +            xnb[j++] = -NBAT_FAR_AWAY*(1 + cz + i);
 +            j++;
 +        }
 +        break;
 +    case nbatX4:
 +        j = X4_IND_A(a0);
 +        c = a0 & (PACK_X4-1);
 +        for(i=0; i<na; i++)
 +        {
 +            xnb[j+XX*PACK_X4] = x[a[i]][XX];
 +            xnb[j+YY*PACK_X4] = x[a[i]][YY];
 +            xnb[j+ZZ*PACK_X4] = x[a[i]][ZZ];
 +            j++;
 +            c++;
 +            if (c == PACK_X4)
 +            {
 +                j += (DIM-1)*PACK_X4;
 +                c  = 0;
 +            }
 +        }
 +        /* Complete the partially filled last cell with particles far apart */
 +        for(; i<na_round; i++)
 +        {
 +            xnb[j+XX*PACK_X4] = -NBAT_FAR_AWAY*(1 + cx);
 +            xnb[j+YY*PACK_X4] = -NBAT_FAR_AWAY*(1 + cy);
 +            xnb[j+ZZ*PACK_X4] = -NBAT_FAR_AWAY*(1 + cz + i);
 +            j++;
 +            c++;
 +            if (c == PACK_X4)
 +            {
 +                j += (DIM-1)*PACK_X4;
 +                c  = 0;
 +            }
 +        }
 +        break;
 +    case nbatX8:
 +        j = X8_IND_A(a0);
 +        c = a0 & (PACK_X8 - 1);
 +        for(i=0; i<na; i++)
 +        {
 +            xnb[j+XX*PACK_X8] = x[a[i]][XX];
 +            xnb[j+YY*PACK_X8] = x[a[i]][YY];
 +            xnb[j+ZZ*PACK_X8] = x[a[i]][ZZ];
 +            j++;
 +            c++;
 +            if (c == PACK_X8)
 +            {
 +                j += (DIM-1)*PACK_X8;
 +                c  = 0;
 +            }
 +        }
 +        /* Complete the partially filled last cell with particles far apart */
 +        for(; i<na_round; i++)
 +        {
 +            xnb[j+XX*PACK_X8] = -NBAT_FAR_AWAY*(1 + cx);
 +            xnb[j+YY*PACK_X8] = -NBAT_FAR_AWAY*(1 + cy);
 +            xnb[j+ZZ*PACK_X8] = -NBAT_FAR_AWAY*(1 + cz + i);
 +            j++;
 +            c++;
 +            if (c == PACK_X8)
 +            {
 +                j += (DIM-1)*PACK_X8;
 +                c  = 0;
 +            }
 +        }
 +        break;
 +    default:
 +        gmx_incons("Unsupported stride");
 +    }
 +}
 +
 +/* Determines the combination rule (or none) to be used, stores it,
 + * and sets the LJ parameters required with the rule.
 + */
 +static void set_combination_rule_data(nbnxn_atomdata_t *nbat)
 +{
 +    int  nt,i,j;
 +    real c6,c12;
 +
 +    nt = nbat->ntype;
 +
 +    switch (nbat->comb_rule)
 +    {
 +    case  ljcrGEOM:
 +        nbat->comb_rule = ljcrGEOM;
 +
 +        for(i=0; i<nt; i++)
 +        {
 +            /* Copy the diagonal from the nbfp matrix */
 +            nbat->nbfp_comb[i*2  ] = sqrt(nbat->nbfp[(i*nt+i)*2  ]);
 +            nbat->nbfp_comb[i*2+1] = sqrt(nbat->nbfp[(i*nt+i)*2+1]);
 +        }
 +        break;
 +    case ljcrLB:
 +        for(i=0; i<nt; i++)
 +        {
 +            /* Get 6*C6 and 12*C12 from the diagonal of the nbfp matrix */
 +            c6  = nbat->nbfp[(i*nt+i)*2  ];
 +            c12 = nbat->nbfp[(i*nt+i)*2+1];
 +            if (c6 > 0 && c12 > 0)
 +            {
 +                /* We store 0.5*2^1/6*sigma and sqrt(4*3*eps),
 +                 * so we get 6*C6 and 12*C12 after combining.
 +                 */
 +                nbat->nbfp_comb[i*2  ] = 0.5*pow(c12/c6,1.0/6.0);
 +                nbat->nbfp_comb[i*2+1] = sqrt(c6*c6/c12);
 +            }
 +            else
 +            {
 +                nbat->nbfp_comb[i*2  ] = 0;
 +                nbat->nbfp_comb[i*2+1] = 0;
 +            }
 +        }
 +        break;
 +    case ljcrNONE:
 +        /* In nbfp_s4 we use a stride of 4 for storing two parameters */
 +        nbat->alloc((void **)&nbat->nbfp_s4,nt*nt*4*sizeof(*nbat->nbfp_s4));
 +        for(i=0; i<nt; i++)
 +        {
 +            for(j=0; j<nt; j++)
 +            {
 +                nbat->nbfp_s4[(i*nt+j)*4+0] = nbat->nbfp[(i*nt+j)*2+0];
 +                nbat->nbfp_s4[(i*nt+j)*4+1] = nbat->nbfp[(i*nt+j)*2+1];
 +                nbat->nbfp_s4[(i*nt+j)*4+2] = 0;
 +                nbat->nbfp_s4[(i*nt+j)*4+3] = 0;
 +            }
 +        }
 +        break;
 +    default:
 +        gmx_incons("Unknown combination rule");
 +        break;
 +    }
 +}
 +
 +/* Initializes an nbnxn_atomdata_t data structure */
 +void nbnxn_atomdata_init(FILE *fp,
 +                         nbnxn_atomdata_t *nbat,
 +                         int nb_kernel_type,
 +                         int ntype,const real *nbfp,
 +                         int n_energygroups,
 +                         int nout,
 +                         nbnxn_alloc_t *alloc,
 +                         nbnxn_free_t  *free)
 +{
 +    int  i,j;
 +    real c6,c12,tol;
 +    char *ptr;
 +    gmx_bool simple,bCombGeom,bCombLB;
 +
 +    if (alloc == NULL)
 +    {
 +        nbat->alloc = nbnxn_alloc_aligned;
 +    }
 +    else
 +    {
 +        nbat->alloc = alloc;
 +    }
 +    if (free == NULL)
 +    {
 +        nbat->free = nbnxn_free_aligned;
 +    }
 +    else
 +    {
 +        nbat->free = free;
 +    }
 +
 +    if (debug)
 +    {
 +        fprintf(debug,"There are %d atom types in the system, adding one for nbnxn_atomdata_t\n",ntype);
 +    }
 +    nbat->ntype = ntype + 1;
 +    nbat->alloc((void **)&nbat->nbfp,
 +                nbat->ntype*nbat->ntype*2*sizeof(*nbat->nbfp));
 +    nbat->alloc((void **)&nbat->nbfp_comb,nbat->ntype*2*sizeof(*nbat->nbfp_comb));
 +
 +    /* A tolerance of 1e-5 seems reasonable for (possibly hand-typed)
 +     * force-field floating point parameters.
 +     */
 +    tol = 1e-5;
 +    ptr = getenv("GMX_LJCOMB_TOL");
 +    if (ptr != NULL)
 +    {
 +        double dbl;
 +
 +        sscanf(ptr,"%lf",&dbl);
 +        tol = dbl;
 +    }
 +    bCombGeom = TRUE;
 +    bCombLB   = TRUE;
 +
 +    /* Temporarily fill nbat->nbfp_comb with sigma and epsilon
 +     * to check for the LB rule.
 +     */
 +    for(i=0; i<ntype; i++)
 +    {
 +        c6  = nbfp[(i*ntype+i)*2  ]/6.0;
 +        c12 = nbfp[(i*ntype+i)*2+1]/12.0;
 +        if (c6 > 0 && c12 > 0)
 +        {
 +            nbat->nbfp_comb[i*2  ] = pow(c12/c6,1.0/6.0);
 +            nbat->nbfp_comb[i*2+1] = 0.25*c6*c6/c12;
 +        }
 +        else if (c6 == 0 && c12 == 0)
 +        {
 +            nbat->nbfp_comb[i*2  ] = 0;
 +            nbat->nbfp_comb[i*2+1] = 0;
 +        }
 +        else
 +        {
 +            /* Can not use LB rule with only dispersion or repulsion */
 +            bCombLB = FALSE;
 +        }
 +    }
 +
 +    for(i=0; i<nbat->ntype; i++)
 +    {
 +        for(j=0; j<nbat->ntype; j++)
 +        {
 +            if (i < ntype && j < ntype)
 +            {
 +                /* fr->nbfp has been updated, so that array too now stores c6/c12 including
 +                 * the 6.0/12.0 prefactors to save 2 flops in the most common case (force-only).
 +                 */
 +                c6  = nbfp[(i*ntype+j)*2  ];
 +                c12 = nbfp[(i*ntype+j)*2+1];
 +                nbat->nbfp[(i*nbat->ntype+j)*2  ] = c6;
 +                nbat->nbfp[(i*nbat->ntype+j)*2+1] = c12;
 +
 +                /* Compare 6*C6 and 12*C12 for geometric cobination rule */
 +                bCombGeom = bCombGeom &&
 +                    gmx_within_tol(c6*c6  ,nbfp[(i*ntype+i)*2  ]*nbfp[(j*ntype+j)*2  ],tol) &&
 +                    gmx_within_tol(c12*c12,nbfp[(i*ntype+i)*2+1]*nbfp[(j*ntype+j)*2+1],tol);
 +
 +                /* Compare C6 and C12 for Lorentz-Berthelot combination rule */
 +                c6  /= 6.0;
 +                c12 /= 12.0;
 +                bCombLB = bCombLB &&
 +                    ((c6 == 0 && c12 == 0 &&
 +                      (nbat->nbfp_comb[i*2+1] == 0 || nbat->nbfp_comb[j*2+1] == 0)) ||
 +                     (c6 > 0 && c12 > 0 &&
 +                      gmx_within_tol(pow(c12/c6,1.0/6.0),0.5*(nbat->nbfp_comb[i*2]+nbat->nbfp_comb[j*2]),tol) &&
 +                      gmx_within_tol(0.25*c6*c6/c12,sqrt(nbat->nbfp_comb[i*2+1]*nbat->nbfp_comb[j*2+1]),tol)));
 +            }
 +            else
 +            {
 +                /* Add zero parameters for the additional dummy atom type */
 +                nbat->nbfp[(i*nbat->ntype+j)*2  ] = 0;
 +                nbat->nbfp[(i*nbat->ntype+j)*2+1] = 0;
 +            }
 +        }
 +    }
 +    if (debug)
 +    {
 +        fprintf(debug,"Combination rules: geometric %d Lorentz-Berthelot %d\n",
 +                bCombGeom,bCombLB);
 +    }
 +
 +    simple = nbnxn_kernel_pairlist_simple(nb_kernel_type);
 +
 +    if (simple)
 +    {
 +        /* We prefer the geometic combination rule,
 +         * as that gives a slightly faster kernel than the LB rule.
 +         */
 +        if (bCombGeom)
 +        {
 +            nbat->comb_rule = ljcrGEOM;
 +        }
 +        else if (bCombLB)
 +        {
 +            nbat->comb_rule = ljcrLB;
 +        }
 +        else
 +        {
 +            nbat->comb_rule = ljcrNONE;
 +
 +            nbat->free(nbat->nbfp_comb);
 +        }
 +
 +        if (fp)
 +        {
 +            if (nbat->comb_rule == ljcrNONE)
 +            {
 +                fprintf(fp,"Using full Lennard-Jones parameter combination matrix\n\n");
 +            }
 +            else
 +            {
 +                fprintf(fp,"Using %s Lennard-Jones combination rule\n\n",
 +                        nbat->comb_rule==ljcrGEOM ? "geometric" : "Lorentz-Berthelot");
 +            }
 +        }
 +
 +        set_combination_rule_data(nbat);
 +    }
 +    else
 +    {
 +        nbat->comb_rule = ljcrNONE;
 +
 +        nbat->free(nbat->nbfp_comb);
 +    }
 +
 +    nbat->natoms  = 0;
 +    nbat->type    = NULL;
 +    nbat->lj_comb = NULL;
 +    if (simple)
 +    {
 +        int pack_x;
 +
 +        switch (nb_kernel_type)
 +        {
 +        case nbnxnk4xN_SIMD_4xN:
 +        case nbnxnk4xN_SIMD_2xNN:
 +            pack_x = max(NBNXN_CPU_CLUSTER_I_SIZE,
 +                         nbnxn_kernel_to_cj_size(nb_kernel_type));
 +            switch (pack_x)
 +            {
 +                case 4:
 +                    nbat->XFormat = nbatX4;
 +                    break;
 +                case 8:
 +                    nbat->XFormat = nbatX8;
 +                    break;
 +                default:
 +                    gmx_incons("Unsupported packing width");
 +            }
 +            break;
 +        default:
 +            nbat->XFormat = nbatXYZ;
 +            break;
 +        }
 +
 +        nbat->FFormat = nbat->XFormat;
 +    }
 +    else
 +    {
 +        nbat->XFormat = nbatXYZQ;
 +        nbat->FFormat = nbatXYZ;
 +    }
 +    nbat->q       = NULL;
 +    nbat->nenergrp = n_energygroups;
 +    if (!simple)
 +    {
 +        /* Energy groups not supported yet for super-sub lists */
 +        if (n_energygroups > 1 && fp != NULL)
 +        {
 +            fprintf(fp,"\nNOTE: With GPUs, reporting energy group contributions is not supported\n\n");
 +        }
 +        nbat->nenergrp = 1;
 +    }
 +    /* Temporary storage goes as #grp^3*simd_width^2/2, so limit to 64 */
 +    if (nbat->nenergrp > 64)
 +    {
 +        gmx_fatal(FARGS,"With NxN kernels not more than 64 energy groups are supported\n");
 +    }
 +    nbat->neg_2log = 1;
 +    while (nbat->nenergrp > (1<<nbat->neg_2log))
 +    {
 +        nbat->neg_2log++;
 +    }
 +    nbat->energrp = NULL;
 +    nbat->alloc((void **)&nbat->shift_vec,SHIFTS*sizeof(*nbat->shift_vec));
 +    nbat->xstride = (nbat->XFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
 +    nbat->fstride = (nbat->FFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
 +    nbat->x       = NULL;
++
++#ifdef GMX_NBNXN_SIMD
++    if (simple)
++    {
++        /* Set the diagonal cluster pair exclusion mask setup data.
++         * In the kernel we check 0 < j - i to generate the masks.
++         * Here we store j - i for generating the mask for the first i,
++         * we substract 0.5 to avoid rounding issues.
++         * In the kernel we can subtract 1 to generate the subsequent mask.
++         */
++        const int simd_width=GMX_NBNXN_SIMD_BITWIDTH/(sizeof(real)*8);
++        int simd_4xn_diag_size,j;
++
++        simd_4xn_diag_size = max(NBNXN_CPU_CLUSTER_I_SIZE,simd_width);
++        snew_aligned(nbat->simd_4xn_diag,simd_4xn_diag_size,NBNXN_MEM_ALIGN);
++        for(j=0; j<simd_4xn_diag_size; j++)
++        {
++            nbat->simd_4xn_diag[j] = j - 0.5;
++        }
++
++        snew_aligned(nbat->simd_2xnn_diag,simd_width,NBNXN_MEM_ALIGN);
++        for(j=0; j<simd_width/2; j++)
++        {
++            /* The j-cluster size is half the SIMD width */
++            nbat->simd_2xnn_diag[j]              = j - 0.5;
++            /* The next half of the SIMD width is for i + 1 */
++            nbat->simd_2xnn_diag[simd_width/2+j] = j - 1 - 0.5;
++        }
++    }
++#endif
++
++    /* Initialize the output data structures */
 +    nbat->nout    = nout;
 +    snew(nbat->out,nbat->nout);
 +    nbat->nalloc  = 0;
 +    for(i=0; i<nbat->nout; i++)
 +    {
 +        nbnxn_atomdata_output_init(&nbat->out[i],
 +                                   nb_kernel_type,
 +                                   nbat->nenergrp,1<<nbat->neg_2log,
 +                                   nbat->alloc);
 +    }
 +    nbat->buffer_flags.flag        = NULL;
 +    nbat->buffer_flags.flag_nalloc = 0;
 +}
 +
 +static void copy_lj_to_nbat_lj_comb_x4(const real *ljparam_type,
 +                                       const int *type,int na,
 +                                       real *ljparam_at)
 +{
 +    int is,k,i;
 +
 +    /* The LJ params follow the combination rule:
 +     * copy the params for the type array to the atom array.
 +     */
 +    for(is=0; is<na; is+=PACK_X4)
 +    {
 +        for(k=0; k<PACK_X4; k++)
 +        {
 +            i = is + k;
 +            ljparam_at[is*2        +k] = ljparam_type[type[i]*2  ];
 +            ljparam_at[is*2+PACK_X4+k] = ljparam_type[type[i]*2+1];
 +        }
 +    }
 +}
 +
 +static void copy_lj_to_nbat_lj_comb_x8(const real *ljparam_type,
 +                                       const int *type,int na,
 +                                       real *ljparam_at)
 +{
 +    int is,k,i;
 +
 +    /* The LJ params follow the combination rule:
 +     * copy the params for the type array to the atom array.
 +     */
 +    for(is=0; is<na; is+=PACK_X8)
 +    {
 +        for(k=0; k<PACK_X8; k++)
 +        {
 +            i = is + k;
 +            ljparam_at[is*2        +k] = ljparam_type[type[i]*2  ];
 +            ljparam_at[is*2+PACK_X8+k] = ljparam_type[type[i]*2+1];
 +        }
 +    }
 +}
 +
 +/* Sets the atom type and LJ data in nbnxn_atomdata_t */
 +static void nbnxn_atomdata_set_atomtypes(nbnxn_atomdata_t *nbat,
 +                                         int ngrid,
 +                                         const nbnxn_search_t nbs,
 +                                         const int *type)
 +{
 +    int g,i,ncz,ash;
 +    const nbnxn_grid_t *grid;
 +
 +    for(g=0; g<ngrid; g++)
 +    {
 +        grid = &nbs->grid[g];
 +
 +        /* Loop over all columns and copy and fill */
 +        for(i=0; i<grid->ncx*grid->ncy; i++)
 +        {
 +            ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
 +            ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
 +
 +            copy_int_to_nbat_int(nbs->a+ash,grid->cxy_na[i],ncz*grid->na_sc,
 +                                 type,nbat->ntype-1,nbat->type+ash);
 +
 +            if (nbat->comb_rule != ljcrNONE)
 +            {
 +                if (nbat->XFormat == nbatX4)
 +                {
 +                    copy_lj_to_nbat_lj_comb_x4(nbat->nbfp_comb,
 +                                               nbat->type+ash,ncz*grid->na_sc,
 +                                               nbat->lj_comb+ash*2);
 +                }
 +                else if (nbat->XFormat == nbatX8)
 +                {
 +                    copy_lj_to_nbat_lj_comb_x8(nbat->nbfp_comb,
 +                                               nbat->type+ash,ncz*grid->na_sc,
 +                                               nbat->lj_comb+ash*2);
 +                }
 +            }
 +        }
 +    }
 +}
 +
 +/* Sets the charges in nbnxn_atomdata_t *nbat */
 +static void nbnxn_atomdata_set_charges(nbnxn_atomdata_t *nbat,
 +                                       int ngrid,
 +                                       const nbnxn_search_t nbs,
 +                                       const real *charge)
 +{
 +    int  g,cxy,ncz,ash,na,na_round,i,j;
 +    real *q;
 +    const nbnxn_grid_t *grid;
 +
 +    for(g=0; g<ngrid; g++)
 +    {
 +        grid = &nbs->grid[g];
 +
 +        /* Loop over all columns and copy and fill */
 +        for(cxy=0; cxy<grid->ncx*grid->ncy; cxy++)
 +        {
 +            ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
 +            na  = grid->cxy_na[cxy];
 +            na_round = (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
 +
 +            if (nbat->XFormat == nbatXYZQ)
 +            {
 +                q = nbat->x + ash*STRIDE_XYZQ + ZZ + 1;
 +                for(i=0; i<na; i++)
 +                {
 +                    *q = charge[nbs->a[ash+i]];
 +                    q += STRIDE_XYZQ;
 +                }
 +                /* Complete the partially filled last cell with zeros */
 +                for(; i<na_round; i++)
 +                {
 +                    *q = 0;
 +                    q += STRIDE_XYZQ;
 +                }
 +            }
 +            else
 +            {
 +                q = nbat->q + ash;
 +                for(i=0; i<na; i++)
 +                {
 +                    *q = charge[nbs->a[ash+i]];
 +                    q++;
 +                }
 +                /* Complete the partially filled last cell with zeros */
 +                for(; i<na_round; i++)
 +                {
 +                    *q = 0;
 +                    q++;
 +                }
 +            }
 +        }
 +    }
 +}
 +
 +/* Copies the energy group indices to a reordered and packed array */
 +static void copy_egp_to_nbat_egps(const int *a,int na,int na_round,
 +                                  int na_c,int bit_shift,
 +                                  const int *in,int *innb)
 +{
 +    int i,j,sa,at;
 +    int comb;
 +
 +    j = 0;
 +    for(i=0; i<na; i+=na_c)
 +    {
 +        /* Store na_c energy group numbers into one int */
 +        comb = 0;
 +        for(sa=0; sa<na_c; sa++)
 +        {
 +            at = a[i+sa];
 +            if (at >= 0)
 +            {
 +                comb |= (GET_CGINFO_GID(in[at]) << (sa*bit_shift));
 +            }
 +        }
 +        innb[j++] = comb;
 +    }
 +    /* Complete the partially filled last cell with fill */
 +    for(; i<na_round; i+=na_c)
 +    {
 +        innb[j++] = 0;
 +    }
 +}
 +
 +/* Set the energy group indices for atoms in nbnxn_atomdata_t */
 +static void nbnxn_atomdata_set_energygroups(nbnxn_atomdata_t *nbat,
 +                                            int ngrid,
 +                                            const nbnxn_search_t nbs,
 +                                            const int *atinfo)
 +{
 +    int g,i,ncz,ash;
 +    const nbnxn_grid_t *grid;
 +
 +    for(g=0; g<ngrid; g++)
 +    {
 +        grid = &nbs->grid[g];
 +
 +        /* Loop over all columns and copy and fill */
 +        for(i=0; i<grid->ncx*grid->ncy; i++)
 +        {
 +            ncz = grid->cxy_ind[i+1] - grid->cxy_ind[i];
 +            ash = (grid->cell0 + grid->cxy_ind[i])*grid->na_sc;
 +
 +            copy_egp_to_nbat_egps(nbs->a+ash,grid->cxy_na[i],ncz*grid->na_sc,
 +                                  nbat->na_c,nbat->neg_2log,
 +                                  atinfo,nbat->energrp+(ash>>grid->na_c_2log));
 +        }
 +    }
 +}
 +
 +/* Sets all required atom parameter data in nbnxn_atomdata_t */
 +void nbnxn_atomdata_set(nbnxn_atomdata_t *nbat,
 +                        int locality,
 +                        const nbnxn_search_t nbs,
 +                        const t_mdatoms *mdatoms,
 +                        const int *atinfo)
 +{
 +    int ngrid;
 +
 +    if (locality == eatLocal)
 +    {
 +        ngrid = 1;
 +    }
 +    else
 +    {
 +        ngrid = nbs->ngrid;
 +    }
 +
 +    nbnxn_atomdata_set_atomtypes(nbat,ngrid,nbs,mdatoms->typeA);
 +
 +    nbnxn_atomdata_set_charges(nbat,ngrid,nbs,mdatoms->chargeA);
 +
 +    if (nbat->nenergrp > 1)
 +    {
 +        nbnxn_atomdata_set_energygroups(nbat,ngrid,nbs,atinfo);
 +    }
 +}
 +
 +/* Copies the shift vector array to nbnxn_atomdata_t */
 +void nbnxn_atomdata_copy_shiftvec(gmx_bool bDynamicBox,
 +                                   rvec *shift_vec,
 +                                   nbnxn_atomdata_t *nbat)
 +{
 +    int i;
 +
 +    nbat->bDynamicBox = bDynamicBox;
 +    for(i=0; i<SHIFTS; i++)
 +    {
 +        copy_rvec(shift_vec[i],nbat->shift_vec[i]);
 +    }
 +}
 +
 +/* Copies (and reorders) the coordinates to nbnxn_atomdata_t */
 +void nbnxn_atomdata_copy_x_to_nbat_x(const nbnxn_search_t nbs,
 +                                      int locality,
 +                                      gmx_bool FillLocal,
 +                                      rvec *x,
 +                                      nbnxn_atomdata_t *nbat)
 +{
 +    int g0=0,g1=0;
 +    int nth,th;
 +
 +    switch (locality)
 +    {
 +    case eatAll:
 +        g0 = 0;
 +        g1 = nbs->ngrid;
 +        break;
 +    case eatLocal:
 +        g0 = 0;
 +        g1 = 1;
 +        break;
 +    case eatNonlocal:
 +        g0 = 1;
 +        g1 = nbs->ngrid;
 +        break;
 +    }
 +
 +    if (FillLocal)
 +    {
 +        nbat->natoms_local = nbs->grid[0].nc*nbs->grid[0].na_sc;
 +    }
 +
 +    nth = gmx_omp_nthreads_get(emntPairsearch);
 +
 +#pragma omp parallel for num_threads(nth) schedule(static)
 +    for(th=0; th<nth; th++)
 +    {
 +        int g;
 +
 +        for(g=g0; g<g1; g++)
 +        {
 +            const nbnxn_grid_t *grid;
 +            int cxy0,cxy1,cxy;
 +
 +            grid = &nbs->grid[g];
 +
 +            cxy0 = (grid->ncx*grid->ncy* th   +nth-1)/nth;
 +            cxy1 = (grid->ncx*grid->ncy*(th+1)+nth-1)/nth;
 +
 +            for(cxy=cxy0; cxy<cxy1; cxy++)
 +            {
 +                int na,ash,na_fill;
 +
 +                na  = grid->cxy_na[cxy];
 +                ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
 +
 +                if (g == 0 && FillLocal)
 +                {
 +                    na_fill =
 +                        (grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy])*grid->na_sc;
 +                }
 +                else
 +                {
 +                    /* We fill only the real particle locations.
 +                     * We assume the filling entries at the end have been
 +                     * properly set before during ns.
 +                     */
 +                    na_fill = na;
 +                }
 +                copy_rvec_to_nbat_real(nbs->a+ash,na,na_fill,x,
 +                                       nbat->XFormat,nbat->x,ash,
 +                                       0,0,0);
 +            }
 +        }
 +    }
 +}
 +
 +static void
 +nbnxn_atomdata_clear_reals(real * gmx_restrict dest,
 +                           int i0, int i1)
 +{
 +    int i;
 +
 +    for(i=i0; i<i1; i++)
 +    {
 +        dest[i] = 0;
 +    }
 +}
 +
 +static void
 +nbnxn_atomdata_reduce_reals(real * gmx_restrict dest,
 +                            gmx_bool bDestSet,
 +                            real ** gmx_restrict src,
 +                            int nsrc,
 +                            int i0, int i1)
 +{
 +    int i,s;
 +
 +    if (bDestSet)
 +    {
 +        /* The destination buffer contains data, add to it */
 +        for(i=i0; i<i1; i++)
 +        {
 +            for(s=0; s<nsrc; s++)
 +            {
 +                dest[i] += src[s][i];
 +            }
 +        }
 +    }
 +    else
 +    {
 +        /* The destination buffer is unitialized, set it first */
 +        for(i=i0; i<i1; i++)
 +        {
 +            dest[i] = src[0][i];
 +            for(s=1; s<nsrc; s++)
 +            {
 +                dest[i] += src[s][i];
 +            }
 +        }
 +    }
 +}
 +
 +static void
- #ifdef NBNXN_SEARCH_SSE
- /* We can use AVX256 here, but not when AVX128 kernels are selected.
-  * As this reduction is not faster with AVX256 anyway, we use 128-bit SIMD.
++nbnxn_atomdata_reduce_reals_simd(real * gmx_restrict dest,
++                                 gmx_bool bDestSet,
++                                 real ** gmx_restrict src,
++                                 int nsrc,
++                                 int i0, int i1)
 +{
- #ifdef GMX_X86_AVX_256
- #define GMX_MM256_HERE
- #else
++#ifdef GMX_NBNXN_SIMD
++/* The SIMD width here is actually independent of that in the kernels,
++ * but we use the same width for simplicity (usually optimal anyhow).
 + */
- #ifdef NBNXN_SEARCH_SSE
-                     nbnxn_atomdata_reduce_reals_x86_simd
++#if GMX_NBNXN_SIMD_BITWIDTH == 128
 +#define GMX_MM128_HERE
 +#endif
++#if GMX_NBNXN_SIMD_BITWIDTH == 256
++#define GMX_MM256_HERE
++#endif
 +#include "gmx_simd_macros.h"
 +
 +    int       i,s;
 +    gmx_mm_pr dest_SSE,src_SSE;
 +
 +    if (bDestSet)
 +    {
 +        for(i=i0; i<i1; i+=GMX_SIMD_WIDTH_HERE)
 +        {
 +            dest_SSE = gmx_load_pr(dest+i);
 +            for(s=0; s<nsrc; s++)
 +            {
 +                src_SSE  = gmx_load_pr(src[s]+i);
 +                dest_SSE = gmx_add_pr(dest_SSE,src_SSE);
 +            }
 +            gmx_store_pr(dest+i,dest_SSE);
 +        }
 +    }
 +    else
 +    {
 +        for(i=i0; i<i1; i+=GMX_SIMD_WIDTH_HERE)
 +        {
 +            dest_SSE = gmx_load_pr(src[0]+i);
 +            for(s=1; s<nsrc; s++)
 +            {
 +                src_SSE  = gmx_load_pr(src[s]+i);
 +                dest_SSE = gmx_add_pr(dest_SSE,src_SSE);
 +            }
 +            gmx_store_pr(dest+i,dest_SSE);
 +        }
 +    }
 +
 +#undef GMX_MM128_HERE
 +#undef GMX_MM256_HERE
 +#endif
 +}
 +
 +/* Add part of the force array(s) from nbnxn_atomdata_t to f */
 +static void
 +nbnxn_atomdata_add_nbat_f_to_f_part(const nbnxn_search_t nbs,
 +                                    const nbnxn_atomdata_t *nbat,
 +                                    nbnxn_atomdata_output_t *out,
 +                                    int nfa,
 +                                    int a0,int a1,
 +                                    rvec *f)
 +{
 +    int  a,i,fa;
 +    const int  *cell;
 +    const real *fnb;
 +
 +    cell = nbs->cell;
 +
 +    /* Loop over all columns and copy and fill */
 +    switch (nbat->FFormat)
 +    {
 +    case nbatXYZ:
 +    case nbatXYZQ:
 +        if (nfa == 1)
 +        {
 +            fnb = out[0].f;
 +
 +            for(a=a0; a<a1; a++)
 +            {
 +                i = cell[a]*nbat->fstride;
 +
 +                f[a][XX] += fnb[i];
 +                f[a][YY] += fnb[i+1];
 +                f[a][ZZ] += fnb[i+2];
 +            }
 +        }
 +        else
 +        {
 +            for(a=a0; a<a1; a++)
 +            {
 +                i = cell[a]*nbat->fstride;
 +
 +                for(fa=0; fa<nfa; fa++)
 +                {
 +                    f[a][XX] += out[fa].f[i];
 +                    f[a][YY] += out[fa].f[i+1];
 +                    f[a][ZZ] += out[fa].f[i+2];
 +                }
 +            }
 +        }
 +        break;
 +    case nbatX4:
 +        if (nfa == 1)
 +        {
 +            fnb = out[0].f;
 +
 +            for(a=a0; a<a1; a++)
 +            {
 +                i = X4_IND_A(cell[a]);
 +
 +                f[a][XX] += fnb[i+XX*PACK_X4];
 +                f[a][YY] += fnb[i+YY*PACK_X4];
 +                f[a][ZZ] += fnb[i+ZZ*PACK_X4];
 +            }
 +        }
 +        else
 +        {
 +            for(a=a0; a<a1; a++)
 +            {
 +                i = X4_IND_A(cell[a]);
 +                
 +                for(fa=0; fa<nfa; fa++)
 +                {
 +                    f[a][XX] += out[fa].f[i+XX*PACK_X4];
 +                    f[a][YY] += out[fa].f[i+YY*PACK_X4];
 +                    f[a][ZZ] += out[fa].f[i+ZZ*PACK_X4];
 +                }
 +            }
 +        }
 +        break;
 +    case nbatX8:
 +        if (nfa == 1)
 +        {
 +            fnb = out[0].f;
 +
 +            for(a=a0; a<a1; a++)
 +            {
 +                i = X8_IND_A(cell[a]);
 +
 +                f[a][XX] += fnb[i+XX*PACK_X8];
 +                f[a][YY] += fnb[i+YY*PACK_X8];
 +                f[a][ZZ] += fnb[i+ZZ*PACK_X8];
 +            }
 +        }
 +        else
 +        {
 +            for(a=a0; a<a1; a++)
 +            {
 +                i = X8_IND_A(cell[a]);
 +                
 +                for(fa=0; fa<nfa; fa++)
 +                {
 +                    f[a][XX] += out[fa].f[i+XX*PACK_X8];
 +                    f[a][YY] += out[fa].f[i+YY*PACK_X8];
 +                    f[a][ZZ] += out[fa].f[i+ZZ*PACK_X8];
 +                }
 +            }
 +        }
 +        break;
 +    }
 +}
 +
 +/* Add the force array(s) from nbnxn_atomdata_t to f */
 +void nbnxn_atomdata_add_nbat_f_to_f(const nbnxn_search_t nbs,
 +                                    int locality,
 +                                    const nbnxn_atomdata_t *nbat,
 +                                    rvec *f)
 +{
 +    int a0=0,na=0;
 +    int nth,th;
 +
 +    nbs_cycle_start(&nbs->cc[enbsCCreducef]);
 +
 +    switch (locality)
 +    {
 +    case eatAll:
 +        a0 = 0;
 +        na = nbs->natoms_nonlocal;
 +        break;
 +    case eatLocal:
 +        a0 = 0;
 +        na = nbs->natoms_local;
 +        break;
 +    case eatNonlocal:
 +        a0 = nbs->natoms_local;
 +        na = nbs->natoms_nonlocal - nbs->natoms_local;
 +        break;
 +    }
 +
 +    nth = gmx_omp_nthreads_get(emntNonbonded);
 +
 +    if (nbat->nout > 1)
 +    {
 +        if (locality != eatAll)
 +        {
 +            gmx_incons("add_f_to_f called with nout>1 and locality!=eatAll");
 +        }
 +
 +        /* Reduce the force thread output buffers into buffer 0, before adding
 +         * them to the, differently ordered, "real" force buffer.
 +         */
 +#pragma omp parallel for num_threads(nth) schedule(static)
 +        for(th=0; th<nth; th++)
 +        {
 +            const nbnxn_buffer_flags_t *flags;
 +            int b0,b1,b;
 +            int i0,i1;
 +            int nfptr;
 +            real *fptr[NBNXN_BUFFERFLAG_MAX_THREADS];
 +            int out;
 +
 +            flags = &nbat->buffer_flags;
 +
 +            /* Calculate the cell-block range for our thread */
 +            b0 = (flags->nflag* th   )/nth;
 +            b1 = (flags->nflag*(th+1))/nth;
 +
 +            for(b=b0; b<b1; b++)
 +            {
 +                i0 =  b   *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
 +                i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
 +
 +                nfptr = 0;
 +                for(out=1; out<nbat->nout; out++)
 +                {
 +                    if (flags->flag[b] & (1U<<out))
 +                    {
 +                        fptr[nfptr++] = nbat->out[out].f;
 +                    }
 +                }
 +                if (nfptr > 0)
 +                {
++#ifdef GMX_NBNXN_SIMD
++                    nbnxn_atomdata_reduce_reals_simd
 +#else
 +                    nbnxn_atomdata_reduce_reals
 +#endif
 +                                               (nbat->out[0].f,
 +                                                flags->flag[b] & (1U<<0),
 +                                                fptr,nfptr,
 +                                                i0,i1);
 +                }
 +                else if (!(flags->flag[b] & (1U<<0)))
 +                {
 +                    nbnxn_atomdata_clear_reals(nbat->out[0].f,
 +                                               i0,i1);
 +                }
 +            }
 +        }
 +    }
 +
 +#pragma omp parallel for num_threads(nth) schedule(static)
 +    for(th=0; th<nth; th++)
 +    {
 +        nbnxn_atomdata_add_nbat_f_to_f_part(nbs,nbat,
 +                                            nbat->out,
 +                                            1,
 +                                            a0+((th+0)*na)/nth,
 +                                            a0+((th+1)*na)/nth,
 +                                            f);
 +    }
 +
 +    nbs_cycle_stop(&nbs->cc[enbsCCreducef]);
 +}
 +
 +/* Adds the shift forces from nbnxn_atomdata_t to fshift */
 +void nbnxn_atomdata_add_nbat_fshift_to_fshift(const nbnxn_atomdata_t *nbat,
 +                                              rvec *fshift)
 +{
 +    const nbnxn_atomdata_output_t *out;
 +    int  th;
 +    int  s;
 +    rvec sum;
 +
 +    out = nbat->out;
 +    
 +    for(s=0; s<SHIFTS; s++)
 +    {
 +        clear_rvec(sum);
 +        for(th=0; th<nbat->nout; th++)
 +        {
 +            sum[XX] += out[th].fshift[s*DIM+XX];
 +            sum[YY] += out[th].fshift[s*DIM+YY];
 +            sum[ZZ] += out[th].fshift[s*DIM+ZZ];
 +        }
 +        rvec_inc(fshift[s],sum);
 +    }
 +}
index fe863769edb2fc04301c3ccb485eec5d3a148e6c,0000000000000000000000000000000000000000..bf9e92b7758722e545df17c7441bf0615d7118c6
mode 100644,000000..100644
--- /dev/null
@@@ -1,102 -1,0 +1,105 @@@
- /* With GPU kernels we group cluster pairs in 4 to optimize memory usage */
- #define NBNXN_GPU_JGROUP_SIZE  4
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + *          GROningen MAchine for Chemical Simulations
 + *
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2012, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + *
 + * For more info, check our website at http://www.gromacs.org
 + *
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +
 +#ifndef _nbnxn_consts_h
 +#define _nbnxn_consts_h
 +
 +#ifdef __cplusplus
 +extern "C" {
 +#endif
 +
 +
 +/* The number of pair-search sub-cells per super-cell, used for GPU */
 +#define GPU_NSUBCELL_Z 2
 +#define GPU_NSUBCELL_Y 2
 +#define GPU_NSUBCELL_X 2
 +#define GPU_NSUBCELL   (GPU_NSUBCELL_Z*GPU_NSUBCELL_Y*GPU_NSUBCELL_X)
 +/* In the non-bonded GPU kernel we operate on cluster-pairs, not cells.
 + * The number of cluster in a super-cluster matches the number of sub-cells
 + * in a pair-search cell, so we introduce a new name for the same value.
 + */
 +#define NBNXN_GPU_NCLUSTER_PER_SUPERCLUSTER  GPU_NSUBCELL
 +
 +/* With CPU kernels the i-cluster size is always 4 atoms.
 + * With x86 SIMD the j-cluster size can be 2, 4 or 8, otherwise 4.
 + */
 +#define NBNXN_CPU_CLUSTER_I_SIZE       4
 +
 +#define NBNXN_CPU_CLUSTER_I_SIZE_2LOG  2
 +
 +/* With GPU kernels the cluster size is 8 atoms */
 +#define NBNXN_GPU_CLUSTER_SIZE         8
 +
++/* With GPU kernels we group cluster pairs in 4 to optimize memory usage.
++ * To change this, also change nbnxn_cj4_t in include/types/nbnxn_pairlist.h.
++ */
++#define NBNXN_GPU_JGROUP_SIZE       4
++#define NBNXN_GPU_JGROUP_SIZE_2LOG  2
 +
 +/* To avoid NaN when excluded atoms are at zero distance, we add a small
 + * number to r^2. NBNXN_AVOID_SING_R2_INC^-3 should fit in real.
 + */
 +#ifndef GMX_DOUBLE
 +#define NBNXN_AVOID_SING_R2_INC  1.0e-12f
 +#else
 +/* The double prec. x86 SIMD kernels use a single prec. invsqrt, so > 1e-38 */
 +#define NBNXN_AVOID_SING_R2_INC  1.0e-36
 +#endif
 +
 +/* Coulomb force table size chosen such that it fits along the non-bonded
 +   parameters in the texture cache. */
 +#define GPU_EWALD_COULOMB_FORCE_TABLE_SIZE 1536
 +
 +
 +/* Strides for x/f with xyz and xyzq coordinate (and charge) storage */
 +#define STRIDE_XYZ   3
 +#define STRIDE_XYZQ  4
 +/* Size of packs of x, y or z with SSE/AVX packed coords/forces */
 +#define PACK_X4      4
 +#define PACK_X8      8
 +/* Strides for a pack of 4 and 8 coordinates/forces */
 +#define STRIDE_P4    (DIM*PACK_X4)
 +#define STRIDE_P8    (DIM*PACK_X8)
 +
 +/* Index of atom a into the SSE/AVX coordinate/force array */
 +#define X4_IND_A(a)  (STRIDE_P4*((a) >> 2) + ((a) & (PACK_X4 - 1)))
 +#define X8_IND_A(a)  (STRIDE_P8*((a) >> 3) + ((a) & (PACK_X8 - 1)))
 +
 +
 +#ifdef __cplusplus
 +}
 +#endif
 +
 +#endif
index 11ab258d9eccd9dcee739bf623079dd7df5231b9,0000000000000000000000000000000000000000..22c6bb931801c58072e04aafe569ac8c8bb14b26
mode 100644,000000..100644
--- /dev/null
@@@ -1,431 -1,0 +1,431 @@@
-             for (jm = 0; jm < 4; jm++)
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + *          GROningen MAchine for Chemical Simulations
 + *
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2012, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + *
 + * For more info, check our website at http://www.gromacs.org
 + *
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +
 +#include "maths.h"
 +/* Note that floating-point constants in CUDA code should be suffixed
 + * with f (e.g. 0.5f), to stop the compiler producing intermediate
 + * code that is in double precision.
 + */
 +
 +#if __CUDA_ARCH__ >= 300
 +#define REDUCE_SHUFFLE
 +/* On Kepler pre-loading i-atom types to shmem gives a few %,
 +   but on Fermi it does not */
 +#define IATYPE_SHMEM
 +#endif
 +
 +/*
 +   Kernel launch parameters:
 +    - #blocks   = #pair lists, blockId = pair list Id
 +    - #threads  = CL_SIZE^2
 +    - shmem     = CL_SIZE^2 * sizeof(float)
 +
 +    Each thread calculates an i force-component taking one pair of i-j atoms.
 + */
 +#ifdef PRUNE_NBL
 +#ifdef CALC_ENERGIES
 +__global__ void NB_KERNEL_FUNC_NAME(k_nbnxn, _ener_prune)
 +#else
 +__global__ void NB_KERNEL_FUNC_NAME(k_nbnxn, _prune)
 +#endif
 +#else
 +#ifdef CALC_ENERGIES
 +__global__ void NB_KERNEL_FUNC_NAME(k_nbnxn, _ener)
 +#else
 +__global__ void NB_KERNEL_FUNC_NAME(k_nbnxn)
 +#endif
 +#endif
 +            (const cu_atomdata_t atdat,
 +             const cu_nbparam_t nbparam,
 +             const cu_plist_t plist,
 +             bool bCalcFshift)
 +{
 +    /* convenience variables */
 +    const nbnxn_sci_t *pl_sci   = plist.sci;
 +#ifndef PRUNE_NBL
 +    const
 +#endif
 +    nbnxn_cj4_t *pl_cj4         = plist.cj4;
 +    const nbnxn_excl_t *excl    = plist.excl;
 +    const int *atom_types       = atdat.atom_types;
 +    int ntypes                  = atdat.ntypes;
 +    const float4 *xq            = atdat.xq;
 +    float3 *f                   = atdat.f;
 +    const float3 *shift_vec     = atdat.shift_vec;
 +    float rcoulomb_sq           = nbparam.rcoulomb_sq;
 +#ifdef VDW_CUTOFF_CHECK
 +    float rvdw_sq               = nbparam.rvdw_sq;
 +    float vdw_in_range;
 +#endif
 +#ifdef EL_RF
 +    float two_k_rf              = nbparam.two_k_rf;
 +#endif
 +#ifdef EL_EWALD
 +    float coulomb_tab_scale     = nbparam.coulomb_tab_scale;
 +#endif
 +#ifdef PRUNE_NBL
 +    float rlist_sq              = nbparam.rlist_sq;
 +#endif
 +
 +#ifdef CALC_ENERGIES
 +    float lj_shift    = nbparam.sh_invrc6;
 +#ifdef EL_EWALD
 +    float beta        = nbparam.ewald_beta;
 +    float ewald_shift = nbparam.sh_ewald;
 +#else
 +    float c_rf        = nbparam.c_rf;
 +#endif
 +    float *e_lj       = atdat.e_lj;
 +    float *e_el       = atdat.e_el;
 +#endif
 +
 +    /* thread/block/warp id-s */
 +    unsigned int tidxi  = threadIdx.x;
 +    unsigned int tidxj  = threadIdx.y;
 +    unsigned int tidx   = threadIdx.y * blockDim.x + threadIdx.x;
 +    unsigned int bidx   = blockIdx.x;
 +    unsigned int widx   = tidx / WARP_SIZE; /* warp index */
 +
 +    int sci, ci, cj, ci_offset,
 +        ai, aj,
 +        cij4_start, cij4_end,
 +        typei, typej,
 +        i, jm, j4, wexcl_idx;
 +    float qi, qj_f,
 +          r2, inv_r, inv_r2, inv_r6,
 +          c6, c12,
 +          int_bit,
 +#ifdef CALC_ENERGIES
 +          E_lj, E_el, E_lj_p,
 +#endif
 +          F_invr;
 +    unsigned int wexcl, imask, mask_ji;
 +    float4 xqbuf;
 +    float3 xi, xj, rv, f_ij, fcj_buf, fshift_buf;
 +    float3 fci_buf[NCL_PER_SUPERCL];    /* i force buffer */
 +    nbnxn_sci_t nb_sci;
 +
 +    /* shmem buffer for i x+q pre-loading */
 +    extern __shared__  float4 xqib[];
 +#ifdef IATYPE_SHMEM
 +    /* shmem buffer for i atom-type pre-loading */
 +    int *atib = (int *)(xqib + NCL_PER_SUPERCL * CL_SIZE);
 +#endif
 +
 +#ifndef REDUCE_SHUFFLE
 +    /* shmem j force buffer */
 +#ifdef IATYPE_SHMEM
 +    float *f_buf = (float *)(atib + NCL_PER_SUPERCL * CL_SIZE);
 +#else
 +    float *f_buf = (float *)(xqib + NCL_PER_SUPERCL * CL_SIZE);
 +#endif
 +#endif
 +
 +    nb_sci      = pl_sci[bidx];         /* my i super-cluster's index = current bidx */
 +    sci         = nb_sci.sci;           /* super-cluster */
 +    cij4_start  = nb_sci.cj4_ind_start; /* first ...*/
 +    cij4_end    = nb_sci.cj4_ind_end;   /* and last index of j clusters */
 +
 +    /* Store the i-atom x and q in shared memory */
 +    /* Note: the thread indexing here is inverted with respect to the
 +       inner-loop as this results in slightly higher performance */
 +    ci = sci * NCL_PER_SUPERCL + tidxi;
 +    ai = ci * CL_SIZE + tidxj;
 +    xqib[tidxi * CL_SIZE + tidxj] = xq[ai] + shift_vec[nb_sci.shift];
 +#ifdef IATYPE_SHMEM
 +    ci = sci * NCL_PER_SUPERCL + tidxj;
 +    ai = ci * CL_SIZE + tidxi;
 +    atib[tidxj * CL_SIZE + tidxi] = atom_types[ai];
 +#endif
 +    __syncthreads();
 +
 +    for(ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
 +    {
 +        fci_buf[ci_offset] = make_float3(0.0f);
 +    }
 +
 +#ifdef CALC_ENERGIES
 +    E_lj = 0.0f;
 +    E_el = 0.0f;
 +
 +#if defined EL_EWALD || defined EL_RF
 +    if (nb_sci.shift == CENTRAL && pl_cj4[cij4_start].cj[0] == sci*NCL_PER_SUPERCL)
 +    {
 +        /* we have the diagonal: add the charge self interaction energy term */
 +        for (i = 0; i < NCL_PER_SUPERCL; i++)
 +        {
 +            qi    = xqib[i * CL_SIZE + tidxi].w;
 +            E_el += qi*qi;
 +        }
 +        /* divide the self term equally over the j-threads */
 +        E_el /= CL_SIZE;
 +#ifdef EL_RF
 +        E_el *= -nbparam.epsfac*0.5f*c_rf;
 +#else
 +        E_el *= -nbparam.epsfac*beta*M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */
 +#endif
 +    }
 +#endif
 +#endif
 +
 +    /* skip central shifts when summing shift forces */
 +    if (nb_sci.shift == CENTRAL)
 +    {
 +        bCalcFshift = false;
 +    }
 +
 +    fshift_buf = make_float3(0.0f);
 +
 +    /* loop over the j clusters = seen by any of the atoms in the current super-cluster */
 +    for (j4 = cij4_start; j4 < cij4_end; j4++)
 +    {
 +        wexcl_idx   = pl_cj4[j4].imei[widx].excl_ind;
 +        imask       = pl_cj4[j4].imei[widx].imask;
 +        wexcl       = excl[wexcl_idx].pair[(tidx) & (WARP_SIZE - 1)];
 +
 +#ifndef PRUNE_NBL
 +        if (imask)
 +#endif
 +        {
 +            /* Unrolling this loop
 +               - with pruning leads to register spilling;
 +               - on Kepler is much slower;
 +               - doesn't work on CUDA <v4.1
 +               Tested with nvcc 3.2 - 5.0.7 */
 +#if !defined PRUNE_NBL && __CUDA_ARCH__ < 300 && CUDA_VERSION >= 4010
 +#pragma unroll 4
 +#endif
-                 if (imask & (255U << (jm * NCL_PER_SUPERCL)))
++            for (jm = 0; jm < NBNXN_GPU_JGROUP_SIZE; jm++)
 +            {
++                if (imask & (supercl_interaction_mask << (jm * NCL_PER_SUPERCL)))
 +                {
 +                    mask_ji = (1U << (jm * NCL_PER_SUPERCL));
 +
 +                    cj      = pl_cj4[j4].cj[jm];
 +                    aj      = cj * CL_SIZE + tidxj;
 +
 +                    /* load j atom data */
 +                    xqbuf   = xq[aj];
 +                    xj      = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
 +                    qj_f    = nbparam.epsfac * xqbuf.w;
 +                    typej   = atom_types[aj];
 +
 +                    fcj_buf = make_float3(0.0f);
 +
 +                    /* The PME and RF kernels don't unroll with CUDA <v4.1. */
 +#if !defined PRUNE_NBL && !(CUDA_VERSION < 4010 && (defined EL_EWALD || defined EL_RF))
 +#pragma unroll 8
 +#endif
 +                    for(i = 0; i < NCL_PER_SUPERCL; i++)
 +                    {
 +                        if (imask & mask_ji)
 +                        {
 +                            ci_offset   = i;    /* i force buffer offset */
 +
 +                            ci      = sci * NCL_PER_SUPERCL + i; /* i cluster index */
 +                            ai      = ci * CL_SIZE + tidxi;      /* i atom index */
 +
 +                            /* all threads load an atom from i cluster ci into shmem! */
 +                            xqbuf   = xqib[i * CL_SIZE + tidxi];
 +                            xi      = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
 +
 +                            /* distance between i and j atoms */
 +                            rv      = xi - xj;
 +                            r2      = norm2(rv);
 +
 +#ifdef PRUNE_NBL
 +                            /* If _none_ of the atoms pairs are in cutoff range,
 +                               the bit corresponding to the current
 +                               cluster-pair in imask gets set to 0. */
 +                            if (!__any(r2 < rlist_sq))
 +                            {
 +                                imask &= ~mask_ji;
 +                            }
 +#endif
 +
 +                            int_bit = (wexcl & mask_ji) ? 1.0f : 0.0f;
 +
 +                            /* cutoff & exclusion check */
 +#if defined EL_EWALD || defined EL_RF
 +                            if (r2 < rcoulomb_sq *
 +                                (nb_sci.shift != CENTRAL || ci != cj || tidxj > tidxi))
 +#else
 +                            if (r2 < rcoulomb_sq * int_bit)
 +#endif
 +                            {
 +                                /* load the rest of the i-atom parameters */
 +                                qi      = xqbuf.w;
 +#ifdef IATYPE_SHMEM
 +                                typei   = atib[i * CL_SIZE + tidxi];
 +#else
 +                                typei   = atom_types[ai];
 +#endif
 +
 +                                /* LJ 6*C6 and 12*C12 */
 +                                c6      = tex1Dfetch(tex_nbfp, 2 * (ntypes * typei + typej));
 +                                c12     = tex1Dfetch(tex_nbfp, 2 * (ntypes * typei + typej) + 1);
 +
 +                                /* avoid NaN for excluded pairs at r=0 */
 +                                r2      += (1.0f - int_bit) * NBNXN_AVOID_SING_R2_INC;
 +
 +                                inv_r   = rsqrt(r2);
 +                                inv_r2  = inv_r * inv_r;
 +                                inv_r6  = inv_r2 * inv_r2 * inv_r2;
 +#if defined EL_EWALD || defined EL_RF
 +                                /* We could mask inv_r2, but with Ewald
 +                                 * masking both inv_r6 and F_invr is faster */
 +                                inv_r6  *= int_bit;
 +#endif
 +
 +                                F_invr  = inv_r6 * (c12 * inv_r6 - c6) * inv_r2;
 +
 +#ifdef CALC_ENERGIES
 +                                E_lj_p  = int_bit * (c12 * (inv_r6 * inv_r6 - lj_shift * lj_shift) * 0.08333333f - c6 * (inv_r6 - lj_shift) * 0.16666667f);
 +#endif
 +
 +#ifdef VDW_CUTOFF_CHECK
 +                                /* this enables twin-range cut-offs (rvdw < rcoulomb <= rlist) */
 +                                vdw_in_range = (r2 < rvdw_sq) ? 1.0f : 0.0f;
 +                                F_invr  *= vdw_in_range;
 +#ifdef CALC_ENERGIES
 +                                E_lj_p  *= vdw_in_range;
 +#endif
 +#endif
 +#ifdef CALC_ENERGIES
 +                                E_lj    += E_lj_p;
 +#endif
 +
 +
 +#ifdef EL_CUTOFF
 +                                F_invr  += qi * qj_f * inv_r2 * inv_r;
 +#endif
 +#ifdef EL_RF
 +                                F_invr  += qi * qj_f * (int_bit*inv_r2 * inv_r - two_k_rf);
 +#endif
 +#ifdef EL_EWALD
 +                                F_invr  += qi * qj_f * (int_bit*inv_r2 - interpolate_coulomb_force_r(r2 * inv_r, coulomb_tab_scale)) * inv_r;
 +#endif
 +
 +#ifdef CALC_ENERGIES
 +#ifdef EL_CUTOFF
 +                                E_el    += qi * qj_f * (inv_r - c_rf);
 +#endif
 +#ifdef EL_RF
 +                                E_el    += qi * qj_f * (int_bit*inv_r + 0.5f * two_k_rf * r2 - c_rf);
 +#endif
 +#ifdef EL_EWALD
 +                                /* 1.0f - erff is faster than erfcf */
 +                                E_el    += qi * qj_f * (inv_r * (int_bit - erff(r2 * inv_r * beta)) - int_bit * ewald_shift);
 +#endif
 +#endif
 +                                f_ij    = rv * F_invr;
 +
 +                                /* accumulate j forces in registers */
 +                                fcj_buf -= f_ij;
 +
 +                                /* accumulate i forces in registers */
 +                                fci_buf[ci_offset] += f_ij;
 +                            }
 +                        }
 +
 +                        /* shift the mask bit by 1 */
 +                        mask_ji += mask_ji;
 +                    }
 +
 +                    /* reduce j forces */
 +#ifdef REDUCE_SHUFFLE
 +                    reduce_force_j_warp_shfl(fcj_buf, f, tidxi, aj);
 +#else
 +                    /* store j forces in shmem */
 +                    f_buf[                  tidx] = fcj_buf.x;
 +                    f_buf[    FBUF_STRIDE + tidx] = fcj_buf.y;
 +                    f_buf[2 * FBUF_STRIDE + tidx] = fcj_buf.z;
 +
 +                    reduce_force_j_generic(f_buf, f, tidxi, tidxj, aj);
 +#endif
 +                }
 +            }
 +#ifdef PRUNE_NBL
 +            /* Update the imask with the new one which does not contain the
 +               out of range clusters anymore. */
 +            pl_cj4[j4].imei[widx].imask = imask;
 +#endif
 +        }
 +    }
 +
 +    /* reduce i forces */
 +    for(ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
 +    {
 +        ai  = (sci * NCL_PER_SUPERCL + ci_offset) * CL_SIZE + tidxi;
 +#ifdef REDUCE_SHUFFLE
 +        reduce_force_i_warp_shfl(fci_buf[ci_offset], f,
 +                                 &fshift_buf, bCalcFshift,
 +                                 tidxj, ai);
 +#else
 +        f_buf[                  tidx] = fci_buf[ci_offset].x;
 +        f_buf[    FBUF_STRIDE + tidx] = fci_buf[ci_offset].y;
 +        f_buf[2 * FBUF_STRIDE + tidx] = fci_buf[ci_offset].z;
 +        __syncthreads();
 +        reduce_force_i(f_buf, f,
 +                       &fshift_buf, bCalcFshift,
 +                       tidxi, tidxj, ai);
 +        __syncthreads();
 +#endif
 +    }
 +
 +    /* add up local shift forces into global mem */
 +#ifdef REDUCE_SHUFFLE
 +    if (bCalcFshift && (tidxj == 0 || tidxj == 4))
 +#else
 +    if (bCalcFshift && tidxj == 0)
 +#endif
 +    {
 +        atomicAdd(&atdat.fshift[nb_sci.shift].x, fshift_buf.x);
 +        atomicAdd(&atdat.fshift[nb_sci.shift].y, fshift_buf.y);
 +        atomicAdd(&atdat.fshift[nb_sci.shift].z, fshift_buf.z);
 +    }
 +
 +#ifdef CALC_ENERGIES
 +#ifdef REDUCE_SHUFFLE
 +    /* reduce the energies over warps and store into global memory */
 +    reduce_energy_warp_shfl(E_lj, E_el, e_lj, e_el, tidx);
 +#else
 +    /* flush the energies to shmem and reduce them */
 +    f_buf[              tidx] = E_lj;
 +    f_buf[FBUF_STRIDE + tidx] = E_el;
 +    reduce_energy_pow2(f_buf + (tidx & WARP_SIZE), e_lj, e_el, tidx & ~WARP_SIZE);
 +#endif
 +#endif
 +}
index 6891d13bb769999c31c23e295de6f74318f593d4,0000000000000000000000000000000000000000..be1c7a778c95b6258fcb0ba80111d9b06822841c
mode 100644,000000..100644
--- /dev/null
@@@ -1,385 -1,0 +1,385 @@@
-             for (jm = 0; jm < 4; jm++)
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + *          GROningen MAchine for Chemical Simulations
 + *
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2012, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + *
 + * For more info, check our website at http://www.gromacs.org
 + *
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +
 +#include "maths.h"
 +/* Note that floating-point constants in CUDA code should be suffixed
 + * with f (e.g. 0.5f), to stop the compiler producing intermediate
 + * code that is in double precision.
 + */
 +
 +/*
 +   Kernel launch parameters:
 +    - #blocks   = #pair lists, blockId = pair list Id
 +    - #threads  = CL_SIZE^2
 +    - shmem     = CL_SIZE^2 * sizeof(float)
 +
 +    Each thread calculates an i force-component taking one pair of i-j atoms.
 + */
 +#ifdef PRUNE_NBL
 +#ifdef CALC_ENERGIES
 +__global__ void NB_KERNEL_FUNC_NAME(k_nbnxn, _ener_prune_legacy)
 +#else
 +__global__ void NB_KERNEL_FUNC_NAME(k_nbnxn, _prune_legacy)
 +#endif
 +#else
 +#ifdef CALC_ENERGIES
 +__global__ void NB_KERNEL_FUNC_NAME(k_nbnxn, _ener_legacy)
 +#else
 +__global__ void NB_KERNEL_FUNC_NAME(k_nbnxn, _legacy)
 +#endif
 +#endif
 +            (const cu_atomdata_t atdat,
 +             const cu_nbparam_t nbparam,
 +             const cu_plist_t plist,
 +             bool bCalcFshift)
 +{
 +    /* convenience variables */
 +    const nbnxn_sci_t *pl_sci   = plist.sci;
 +#ifndef PRUNE_NBL
 +    const
 +#endif
 +    nbnxn_cj4_t *pl_cj4         = plist.cj4;
 +    const nbnxn_excl_t *excl    = plist.excl;
 +    const int *atom_types       = atdat.atom_types;
 +    int ntypes                  = atdat.ntypes;
 +    const float4 *xq            = atdat.xq;
 +    float3 *f                   = atdat.f;
 +    const float3 *shift_vec     = atdat.shift_vec;
 +    float rcoulomb_sq           = nbparam.rcoulomb_sq;
 +#ifdef VDW_CUTOFF_CHECK
 +    float rvdw_sq               = nbparam.rvdw_sq;
 +    float vdw_in_range;
 +#endif
 +#ifdef EL_RF
 +    float two_k_rf              = nbparam.two_k_rf;
 +#endif
 +#ifdef EL_EWALD
 +    float coulomb_tab_scale     = nbparam.coulomb_tab_scale;
 +#endif
 +#ifdef PRUNE_NBL
 +    float rlist_sq              = nbparam.rlist_sq;
 +#endif
 +
 +#ifdef CALC_ENERGIES
 +    float lj_shift    = nbparam.sh_invrc6;
 +#ifdef EL_EWALD
 +    float beta        = nbparam.ewald_beta;
 +    float ewald_shift = nbparam.sh_ewald;
 +#else
 +    float c_rf        = nbparam.c_rf;
 +#endif
 +    float *e_lj       = atdat.e_lj;
 +    float *e_el       = atdat.e_el;
 +#endif
 +
 +    /* thread/block/warp id-s */
 +    unsigned int tidxi  = threadIdx.x;
 +    unsigned int tidxj  = threadIdx.y;
 +    unsigned int tidx   = threadIdx.y * blockDim.x + threadIdx.x;
 +    unsigned int bidx   = blockIdx.x;
 +    unsigned int widx   = tidx / WARP_SIZE; /* warp index */
 +
 +    int sci, ci, cj, ci_offset,
 +        ai, aj,
 +        cij4_start, cij4_end,
 +        typei, typej,
 +        i, cii, jm, j4, nsubi, wexcl_idx;
 +    float qi, qj_f,
 +          r2, inv_r, inv_r2, inv_r6,
 +          c6, c12,
 +#ifdef CALC_ENERGIES
 +          E_lj, E_el, E_lj_p,
 +#endif
 +          F_invr;
 +    unsigned int wexcl, int_bit, imask, imask_j;
 +#ifdef PRUNE_NBL
 +    unsigned int imask_prune;
 +#endif
 +    float4 xqbuf;
 +    float3 xi, xj, rv, f_ij, fcj_buf, fshift_buf;
 +    float3 fci_buf[NCL_PER_SUPERCL];    /* i force buffer */
 +    nbnxn_sci_t nb_sci;
 +
 +    /* shmem buffer for i x+q pre-loading */
 +    extern __shared__  float4 xqib[];
 +    /* shmem j force buffer */
 +    float *f_buf = (float *)(xqib + NCL_PER_SUPERCL * CL_SIZE);
 +
 +    nb_sci      = pl_sci[bidx];         /* my i super-cluster's index = current bidx */
 +    sci         = nb_sci.sci;           /* super-cluster */
 +    cij4_start  = nb_sci.cj4_ind_start; /* first ...*/
 +    cij4_end    = nb_sci.cj4_ind_end;   /* and last index of j clusters */
 +
 +    /* Store the i-atom x and q in shared memory */
 +    /* Note: the thread indexing here is inverted with respect to the
 +       inner-loop as this results in slightly higher performance */
 +    ci = sci * NCL_PER_SUPERCL + tidxi;
 +    ai = ci * CL_SIZE + tidxj;
 +    xqib[tidxi * CL_SIZE + tidxj] = xq[ai] + shift_vec[nb_sci.shift];
 +    __syncthreads();
 +
 +    for(ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
 +    {
 +        fci_buf[ci_offset] = make_float3(0.0f);
 +    }
 +
 +#ifdef CALC_ENERGIES
 +    E_lj = 0.0f;
 +    E_el = 0.0f;
 +
 +#if defined EL_EWALD || defined EL_RF
 +    if (nb_sci.shift == CENTRAL && pl_cj4[cij4_start].cj[0] == sci*NCL_PER_SUPERCL)
 +    {
 +        /* we have the diagonal: add the charge self interaction energy term */
 +        for (i = 0; i < NCL_PER_SUPERCL; i++)
 +        {
 +            qi    = xqib[i * CL_SIZE + tidxi].w;
 +            E_el += qi*qi;
 +        }
 +        /* divide the self term equally over the j-threads */
 +        E_el /= CL_SIZE;
 +#ifdef EL_RF
 +        E_el *= -nbparam.epsfac*0.5f*c_rf;
 +#else
 +        E_el *= -nbparam.epsfac*beta*M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */
 +#endif
 +    }
 +#endif
 +#endif
 +
 +    /* skip central shifts when summing shift forces */
 +    if (nb_sci.shift == CENTRAL)
 +    {
 +        bCalcFshift = false;
 +    }
 +
 +    fshift_buf = make_float3(0.0f);
 +
 +    /* loop over the j clusters = seen by any of the atoms in the current super-cluster */
 +    for (j4 = cij4_start; j4 < cij4_end; j4++)
 +    {
 +        wexcl_idx   = pl_cj4[j4].imei[widx].excl_ind;
 +        imask       = pl_cj4[j4].imei[widx].imask;
 +        wexcl       = excl[wexcl_idx].pair[(tidx) & (WARP_SIZE - 1)];
 +
 +#ifndef PRUNE_NBL
 +        if (imask)
 +#endif
 +        {
 +#ifdef PRUNE_NBL
 +            imask_prune = imask;
 +#endif
 +
 +            /* nvcc >v4.1 doesn't like this loop, it refuses to unroll it */
 +#if CUDA_VERSION >= 4010
 +            #pragma unroll 4
 +#endif
-                 imask_j = (imask >> (jm * 8)) & 255U;
++            for (jm = 0; jm < NBNXN_GPU_JGROUP_SIZE; jm++)
 +            {
++                imask_j = (imask >> (jm * CL_SIZE)) & supercl_interaction_mask;
 +                if (imask_j)
 +                {
 +                    nsubi = __popc(imask_j);
 +
 +                    cj      = pl_cj4[j4].cj[jm];
 +                    aj      = cj * CL_SIZE + tidxj;
 +
 +                    /* load j atom data */
 +                    xqbuf   = xq[aj];
 +                    xj      = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
 +                    qj_f    = nbparam.epsfac * xqbuf.w;
 +                    typej   = atom_types[aj];
 +
 +                    fcj_buf = make_float3(0.0f);
 +
 +                    /* loop over the i-clusters in sci */
 +                    /* #pragma unroll 8
 +                       -- nvcc doesn't like my code, it refuses to unroll it
 +                       which is a pity because here unrolling could help.  */
 +                    for (cii = 0; cii < nsubi; cii++)
 +                    {
 +                        i = __ffs(imask_j) - 1;
 +                        imask_j &= ~(1U << i);
 +
 +                        ci_offset   = i;    /* i force buffer offset */
 +
 +                        ci      = sci * NCL_PER_SUPERCL + i; /* i cluster index */
 +                        ai      = ci * CL_SIZE + tidxi;      /* i atom index */
 +
 +                        /* all threads load an atom from i cluster ci into shmem! */
 +                        xqbuf   = xqib[i * CL_SIZE + tidxi];
 +                        xi      = make_float3(xqbuf.x, xqbuf.y, xqbuf.z);
 +
 +                        /* distance between i and j atoms */
 +                        rv      = xi - xj;
 +                        r2      = norm2(rv);
 +
 +#ifdef PRUNE_NBL
 +                        /* If _none_ of the atoms pairs are in cutoff range,
 +                               the bit corresponding to the current
 +                               cluster-pair in imask gets set to 0. */
 +                        if (!__any(r2 < rlist_sq))
 +                        {
 +                            imask_prune &= ~(1U << (jm * NCL_PER_SUPERCL + i));
 +                        }
 +#endif
 +
 +                        int_bit = ((wexcl >> (jm * NCL_PER_SUPERCL + i)) & 1);
 +
 +                        /* cutoff & exclusion check */
 +#if defined EL_EWALD || defined EL_RF
 +                        if (r2 < rcoulomb_sq *
 +                            (nb_sci.shift != CENTRAL || ci != cj || tidxj > tidxi))
 +#else
 +                        if (r2 < rcoulomb_sq * int_bit)
 +#endif
 +                        {
 +                            /* load the rest of the i-atom parameters */
 +                            qi      = xqbuf.w;
 +                            typei   = atom_types[ai];
 +
 +                            /* LJ 6*C6 and 12*C12 */
 +                            c6      = tex1Dfetch(tex_nbfp, 2 * (ntypes * typei + typej));
 +                            c12     = tex1Dfetch(tex_nbfp, 2 * (ntypes * typei + typej) + 1);
 +
 +                            /* avoid NaN for excluded pairs at r=0 */
 +                            r2      += (1.0f - int_bit) * NBNXN_AVOID_SING_R2_INC;
 +
 +                            inv_r   = rsqrt(r2);
 +                            inv_r2  = inv_r * inv_r;
 +                            inv_r6  = inv_r2 * inv_r2 * inv_r2;
 +#if defined EL_EWALD || defined EL_RF
 +                            /* We could mask inv_r2, but with Ewald
 +                             * masking both inv_r6 and F_invr is faster */
 +                            inv_r6  *= int_bit;
 +#endif
 +
 +                            F_invr  = inv_r6 * (c12 * inv_r6 - c6) * inv_r2;
 +
 +#ifdef CALC_ENERGIES
 +                                E_lj_p  = int_bit * (c12 * (inv_r6 * inv_r6 - lj_shift * lj_shift) * 0.08333333f - c6 * (inv_r6 - lj_shift) * 0.16666667f);
 +#endif
 +
 +#ifdef VDW_CUTOFF_CHECK
 +                                /* this enables twin-range cut-offs (rvdw < rcoulomb <= rlist) */
 +                                vdw_in_range = (r2 < rvdw_sq) ? 1.0f : 0.0f;
 +                                F_invr  *= vdw_in_range;
 +#ifdef CALC_ENERGIES
 +                                E_lj_p  *= vdw_in_range;
 +#endif
 +#endif
 +#ifdef CALC_ENERGIES
 +                                E_lj    += E_lj_p;
 +#endif
 +
 +
 +#ifdef EL_CUTOFF
 +                            F_invr  += qi * qj_f * inv_r2 * inv_r;
 +#endif
 +#ifdef EL_RF
 +                            F_invr  += qi * qj_f * (int_bit*inv_r2 * inv_r - two_k_rf);
 +#endif
 +#ifdef EL_EWALD
 +                            F_invr  += qi * qj_f * (int_bit*inv_r2 - interpolate_coulomb_force_r(r2 * inv_r, coulomb_tab_scale)) * inv_r;
 +#endif
 +
 +#ifdef CALC_ENERGIES
 +#ifdef EL_CUTOFF
 +                            E_el    += qi * qj_f * (inv_r - c_rf);
 +#endif
 +#ifdef EL_RF
 +                            E_el    += qi * qj_f * (int_bit*inv_r + 0.5f * two_k_rf * r2 - c_rf);
 +#endif
 +#ifdef EL_EWALD
 +                            /* 1.0f - erff is faster than erfcf */
 +                            E_el    += qi * qj_f * (inv_r * (int_bit - erff(r2 * inv_r * beta)) - int_bit * ewald_shift);
 +#endif
 +#endif
 +                            f_ij    = rv * F_invr;
 +
 +                            /* accumulate j forces in registers */
 +                            fcj_buf -= f_ij;
 +
 +                            /* accumulate i forces in registers */
 +                            fci_buf[ci_offset] += f_ij;
 +                        }
 +                    }
 +
 +                    /* store j forces in shmem */
 +                    f_buf[                  tidx] = fcj_buf.x;
 +                    f_buf[    FBUF_STRIDE + tidx] = fcj_buf.y;
 +                    f_buf[2 * FBUF_STRIDE + tidx] = fcj_buf.z;
 +
 +                    /* reduce j forces */
 +                    reduce_force_j_generic(f_buf, f, tidxi, tidxj, aj);
 +                }
 +            }
 +#ifdef PRUNE_NBL
 +            /* Update the imask with the new one which does not contain the
 +               out of range clusters anymore. */
 +            pl_cj4[j4].imei[widx].imask = imask_prune;
 +#endif
 +        }
 +    }
 +
 +    /* reduce i forces */
 +    for(ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
 +    {
 +        ai  = (sci * NCL_PER_SUPERCL + ci_offset) * CL_SIZE + tidxi;
 +        f_buf[                  tidx] = fci_buf[ci_offset].x;
 +        f_buf[    FBUF_STRIDE + tidx] = fci_buf[ci_offset].y;
 +        f_buf[2 * FBUF_STRIDE + tidx] = fci_buf[ci_offset].z;
 +        __syncthreads();
 +        reduce_force_i(f_buf, f,
 +                       &fshift_buf, bCalcFshift,
 +                       tidxi, tidxj, ai);
 +        __syncthreads();
 +    }
 +
 +    /* add up local shift forces into global mem */
 +    if (bCalcFshift && tidxj == 0)
 +    {
 +        atomicAdd(&atdat.fshift[nb_sci.shift].x, fshift_buf.x);
 +        atomicAdd(&atdat.fshift[nb_sci.shift].y, fshift_buf.y);
 +        atomicAdd(&atdat.fshift[nb_sci.shift].z, fshift_buf.z);
 +    }
 +
 +#ifdef CALC_ENERGIES
 +    /* flush the energies to shmem and reduce them */
 +    f_buf[              tidx] = E_lj;
 +    f_buf[FBUF_STRIDE + tidx] = E_el;
 +    reduce_energy_pow2(f_buf + (tidx & WARP_SIZE), e_lj, e_el, tidx & ~WARP_SIZE);
 +#endif
 +}
index 610be4571d4a6504d9991f9761cdadd8c783f33c,0000000000000000000000000000000000000000..d057fc550a158c4fec3640a668bf026df5c78798
mode 100644,000000..100644
--- /dev/null
@@@ -1,302 -1,0 +1,305 @@@
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + *          GROningen MAchine for Chemical Simulations
 + *
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2012, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + *
 + * For more info, check our website at http://www.gromacs.org
 + *
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +
 +/* Note that floating-point constants in CUDA code should be suffixed
 + * with f (e.g. 0.5f), to stop the compiler producing intermediate
 + * code that is in double precision.
 + */
 +
 +#include "../../gmxlib/cuda_tools/vectype_ops.cuh"
 +
 +#ifndef NBNXN_CUDA_KERNEL_UTILS_CUH
 +#define NBNXN_CUDA_KERNEL_UTILS_CUH
 +
 +#define WARP_SIZE_POW2_EXPONENT     (5)
 +#define CL_SIZE_POW2_EXPONENT       (3)  /* change this together with GPU_NS_CLUSTER_SIZE !*/
 +#define CL_SIZE_SQ                  (CL_SIZE * CL_SIZE)
 +#define FBUF_STRIDE                 (CL_SIZE_SQ)
 +
++/*! i-cluster interaction mask for a super-cluster with all NCL_PER_SUPERCL bits set */
++const unsigned supercl_interaction_mask = ((1U << NCL_PER_SUPERCL) - 1U);
++
 +/*! Interpolate Ewald coulomb force using the table through the tex_nbfp texture.
 + *  Original idea: OpenMM
 + */
 +static inline __device__
 +float interpolate_coulomb_force_r(float r, float scale)
 +{
 +    float   normalized = scale * r;
 +    int     index = (int) normalized;
 +    float   fract2 = normalized - index;
 +    float   fract1 = 1.0f - fract2;
 +
 +    return  fract1 * tex1Dfetch(tex_coulomb_tab, index)
 +            + fract2 * tex1Dfetch(tex_coulomb_tab, index + 1);
 +}
 +
 +/*! Final j-force reduction; this generic implementation works with
 + *  arbitrary array sizes.
 + */
 +static inline __device__
 +void reduce_force_j_generic(float *f_buf, float3 *fout,
 +                            int tidxi, int tidxj, int aidx)
 +{
 +    if (tidxi == 0)
 +    {
 +        float3 f = make_float3(0.0f);
 +        for (int j = tidxj * CL_SIZE; j < (tidxj + 1) * CL_SIZE; j++)
 +        {
 +            f.x += f_buf[                  j];
 +            f.y += f_buf[    FBUF_STRIDE + j];
 +            f.z += f_buf[2 * FBUF_STRIDE + j];
 +        }
 +
 +        atomicAdd(&fout[aidx], f);
 +    }
 +}
 +
 +/*! Final j-force reduction; this implementation only with power of two
 + *  array sizes and with sm >= 3.0
 + */
 +#if __CUDA_ARCH__ >= 300
 +static inline __device__
 +void reduce_force_j_warp_shfl(float3 f, float3 *fout,
 +                              int tidxi, int aidx)
 +{
 +    int i;
 +
 +#pragma unroll 3
 +    for (i = 0; i < 3; i++)
 +    {
 +        f.x += __shfl_down(f.x, 1<<i);
 +        f.y += __shfl_down(f.y, 1<<i);
 +        f.z += __shfl_down(f.z, 1<<i);
 +    }
 +
 +    /* Write the reduced j-force on one thread for each j */
 +    if (tidxi == 0)
 +    {
 +        atomicAdd(&fout[aidx], f);
 +    }
 +}
 +#endif
 +
 +/*! Final i-force reduction; this generic implementation works with
 + *  arbitrary array sizes.
 + */
 +static inline __device__
 +void reduce_force_i_generic(float *f_buf, float3 *fout,
 +                            float3 *fshift_buf, bool bCalcFshift,
 +                            int tidxi, int tidxj, int aidx)
 +{
 +    if (tidxj == 0)
 +    {
 +        float3 f = make_float3(0.0f);
 +        for (int j = tidxi; j < CL_SIZE_SQ; j += CL_SIZE)
 +        {
 +            f.x += f_buf[                  j];
 +            f.y += f_buf[    FBUF_STRIDE + j];
 +            f.z += f_buf[2 * FBUF_STRIDE + j];
 +        }
 +
 +        atomicAdd(&fout[aidx], f);
 +
 +        if (bCalcFshift)
 +        {
 +            *fshift_buf += f;
 +        }
 +    }
 +}
 +
 +/*! Final i-force reduction; this implementation works only with power of two
 + *  array sizes.
 + */
 +static inline __device__
 +void reduce_force_i_pow2(volatile float *f_buf, float3 *fout,
 +                         float3 *fshift_buf, bool bCalcFshift,
 +                         int tidxi, int tidxj, int aidx)
 +{
 +    int     i, j;
 +    float3  f = make_float3(0.0f);
 +
 +    /* Reduce the initial CL_SIZE values for each i atom to half
 +     * every step by using CL_SIZE * i threads.
 +     * Can't just use i as loop variable because than nvcc refuses to unroll.
 +     */
 +    i = CL_SIZE/2;
 +    # pragma unroll 5
 +    for (j = CL_SIZE_POW2_EXPONENT - 1; j > 0; j--)
 +    {
 +        if (tidxj < i)
 +        {
 +
 +            f_buf[                  tidxj * CL_SIZE + tidxi] += f_buf[                  (tidxj + i) * CL_SIZE + tidxi];
 +            f_buf[    FBUF_STRIDE + tidxj * CL_SIZE + tidxi] += f_buf[    FBUF_STRIDE + (tidxj + i) * CL_SIZE + tidxi];
 +            f_buf[2 * FBUF_STRIDE + tidxj * CL_SIZE + tidxi] += f_buf[2 * FBUF_STRIDE + (tidxj + i) * CL_SIZE + tidxi];
 +        }
 +        i >>= 1;
 +    }
 +
 +    /* i == 1, last reduction step, writing to global mem */
 +    if (tidxj == 0)
 +    {
 +        f.x = f_buf[                  tidxj * CL_SIZE + tidxi] + f_buf[                  (tidxj + i) * CL_SIZE + tidxi];
 +        f.y = f_buf[    FBUF_STRIDE + tidxj * CL_SIZE + tidxi] + f_buf[    FBUF_STRIDE + (tidxj + i) * CL_SIZE + tidxi];
 +        f.z = f_buf[2 * FBUF_STRIDE + tidxj * CL_SIZE + tidxi] + f_buf[2 * FBUF_STRIDE + (tidxj + i) * CL_SIZE + tidxi];
 +
 +        atomicAdd(&fout[aidx], f);
 +
 +        if (bCalcFshift)
 +        {
 +            *fshift_buf += f;
 +        }
 +    }
 +}
 +
 +/*! Final i-force reduction wrapper; calls the generic or pow2 reduction depending
 + *  on whether the size of the array to be reduced is power of two or not.
 + */
 +static inline __device__
 +void reduce_force_i(float *f_buf, float3 *f,
 +                    float3 *fshift_buf, bool bCalcFshift,
 +                    int tidxi, int tidxj, int ai)
 +{
 +    if ((CL_SIZE & (CL_SIZE - 1)))
 +    {
 +        reduce_force_i_generic(f_buf, f, fshift_buf, bCalcFshift, tidxi, tidxj, ai);
 +    }
 +    else
 +    {
 +        reduce_force_i_pow2(f_buf, f, fshift_buf, bCalcFshift, tidxi, tidxj, ai);
 +    }
 +}
 +
 +/*! Final i-force reduction; this implementation works only with power of two
 + *  array sizes and with sm >= 3.0
 + */
 +#if __CUDA_ARCH__ >= 300
 +static inline __device__
 +void reduce_force_i_warp_shfl(float3 fin, float3 *fout,
 +                              float3 *fshift_buf, bool bCalcFshift,
 +                              int tidxj, int aidx)
 +{
 +    int j;
 +
 +#pragma unroll 2
 +    for (j = 0; j < 2; j++)
 +    {
 +        fin.x += __shfl_down(fin.x,  CL_SIZE<<j);
 +        fin.y += __shfl_down(fin.y,  CL_SIZE<<j);
 +        fin.z += __shfl_down(fin.z,  CL_SIZE<<j);
 +    }
 +
 +    /* The first thread in the warp writes the reduced force */
 +    if (tidxj == 0 || tidxj == 4)
 +    {
 +        atomicAdd(&fout[aidx], fin);
 +
 +        if (bCalcFshift)
 +        {
 +            fshift_buf->x += fin.x;
 +            fshift_buf->y += fin.y;
 +            fshift_buf->z += fin.z;
 +        }
 +    }
 +}
 +#endif
 +
 +/*! Energy reduction; this implementation works only with power of two
 + *  array sizes.
 + */
 +static inline __device__
 +void reduce_energy_pow2(volatile float *buf,
 +                        float *e_lj, float *e_el,
 +                        unsigned int tidx)
 +{
 +    int     i, j;
 +    float   e1, e2;
 +
 +    i = WARP_SIZE/2;
 +
 +    /* Can't just use i as loop variable because than nvcc refuses to unroll. */
 +# pragma unroll 10
 +    for (j = WARP_SIZE_POW2_EXPONENT - 1; j > 0; j--)
 +    {
 +        if (tidx < i)
 +        {
 +            buf[              tidx] += buf[              tidx + i];
 +            buf[FBUF_STRIDE + tidx] += buf[FBUF_STRIDE + tidx + i];
 +        }
 +        i >>= 1;
 +    }
 +
 +    /* last reduction step, writing to global mem */
 +    if (tidx == 0)
 +    {
 +        e1 = buf[              tidx] + buf[              tidx + i];
 +        e2 = buf[FBUF_STRIDE + tidx] + buf[FBUF_STRIDE + tidx + i];
 +
 +        atomicAdd(e_lj, e1);
 +        atomicAdd(e_el, e2);
 +    }
 +}
 +
 +/*! Energy reduction; this implementation works only with power of two
 + *  array sizes and with sm >= 3.0
 + */
 +#if __CUDA_ARCH__ >= 300
 +static inline __device__
 +void reduce_energy_warp_shfl(float E_lj, float E_el,
 +                             float *e_lj, float *e_el,
 +                             int tidx)
 +{
 +    int i, sh;
 +
 +    sh = 1;
 +#pragma unroll 5
 +    for (i = 0; i < 5; i++)
 +    {
 +        E_lj += __shfl_down(E_lj,sh);
 +        E_el += __shfl_down(E_el,sh);
 +        sh += sh;
 +    }
 +
 +    /* The first thread in the warp writes the reduced energies */
 +    if (tidx == 0 || tidx == WARP_SIZE)
 +    {
 +        atomicAdd(e_lj,E_lj);
 +        atomicAdd(e_el,E_el);
 +    }
 +}
 +#endif /* __CUDA_ARCH__ */
 +
 +#endif /* NBNXN_CUDA_KERNEL_UTILS_CUH */
index a9b8e2d4419283b62dc1de822ab2e6428e0f1e75,0000000000000000000000000000000000000000..c54e068a6da224593416774f60ebd525e4fb2094
mode 100644,000000..100644
--- /dev/null
@@@ -1,244 -1,0 +1,254 @@@
- #define NBNXN_SEARCH_SSE
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustr
 + *
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + *          GROningen MAchine for Chemical Simulations
 + *
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2012, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + *
 + * For more info, check our website at http://www.gromacs.org
 + *
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +
 +#ifndef _nbnxn_internal_h
 +#define _nbnxn_internal_h
 +
 +#include "typedefs.h"
 +#include "domdec.h"
 +#include "gmx_cyclecounter.h"
 +
 +#ifdef __cplusplus
 +extern "C" {
 +#endif
 +
 +
 +#ifdef GMX_X86_SSE2
++/* Use 4-way SIMD for, always, single precision bounding box calculations */
++#define NBNXN_SEARCH_BB_SSE
++#endif
++
++
++#ifdef GMX_NBNXN_SIMD
++/* Memory alignment in bytes as required by SIMD aligned loads/stores */
++#define NBNXN_MEM_ALIGN  (GMX_NBNXN_SIMD_BITWIDTH/8)
++#else
++/* No alignment required, but set it so we can call the same routines */
++#define NBNXN_MEM_ALIGN  32
 +#endif
 +
 +
 +/* A pair-search grid struct for one domain decomposition zone */
 +typedef struct {
 +    rvec c0;             /* The lower corner of the (local) grid        */
 +    rvec c1;             /* The upper corner of the (local) grid        */
 +    real atom_density;   /* The atom number density for the local grid  */
 +
 +    gmx_bool bSimple;    /* Is this grid simple or super/sub            */
 +    int  na_c;           /* Number of atoms per cluster                 */
 +    int  na_cj;          /* Number of atoms for list j-clusters         */
 +    int  na_sc;          /* Number of atoms per super-cluster           */
 +    int  na_c_2log;      /* 2log of na_c                                */
 +
 +    int  ncx;            /* Number of (super-)cells along x             */
 +    int  ncy;            /* Number of (super-)cells along y             */
 +    int  nc;             /* Total number of (super-)cells               */
 +
 +    real sx;             /* x-size of a (super-)cell                    */
 +    real sy;             /* y-size of a (super-)cell                    */
 +    real inv_sx;         /* 1/sx                                        */
 +    real inv_sy;         /* 1/sy                                        */
 +
 +    int  cell0;          /* Index in nbs->cell corresponding to cell 0  */
 +
 +    int  *cxy_na;        /* The number of atoms for each column in x,y  */
 +    int  *cxy_ind;       /* Grid (super)cell index, offset from cell0   */
 +    int  cxy_nalloc;     /* Allocation size for cxy_na and cxy_ind      */
 +
 +    int   *nsubc;        /* The number of sub cells for each super cell */
 +    float *bbcz;         /* Bounding boxes in z for the super cells     */
 +    float *bb;           /* 3D bounding boxes for the sub cells         */
 +    float *bbj;          /* 3D j-b.boxes for SSE-double or AVX-single   */
 +    int   *flags;        /* Flag for the super cells                    */
 +    int   nc_nalloc;     /* Allocation size for the pointers above      */
 +
 +    float *bbcz_simple;  /* bbcz for simple grid converted from super   */
 +    float *bb_simple;    /* bb for simple grid converted from super     */
 +    int   *flags_simple; /* flags for simple grid converted from super  */
 +    int   nc_nalloc_simple; /* Allocation size for the pointers above   */
 +
 +    int  nsubc_tot;      /* Total number of subcell, used for printing  */
 +} nbnxn_grid_t;
 +
 +#ifdef GMX_NBNXN_SIMD
 +#if GMX_NBNXN_SIMD_BITWIDTH == 128
 +#define GMX_MM128_HERE
 +#else
 +#if GMX_NBNXN_SIMD_BITWIDTH == 256
 +#define GMX_MM256_HERE
 +#else
 +#error "unsupported GMX_NBNXN_SIMD_BITWIDTH"
 +#endif
 +#endif
 +#include "gmx_simd_macros.h"
 +
 +typedef struct nbnxn_x_ci_simd_4xn {
 +    /* The i-cluster coordinates for simple search */
 +    gmx_mm_pr ix_SSE0,iy_SSE0,iz_SSE0;
 +    gmx_mm_pr ix_SSE1,iy_SSE1,iz_SSE1;
 +    gmx_mm_pr ix_SSE2,iy_SSE2,iz_SSE2;
 +    gmx_mm_pr ix_SSE3,iy_SSE3,iz_SSE3;
 +} nbnxn_x_ci_simd_4xn_t;
 +
 +typedef struct nbnxn_x_ci_simd_2xnn {
 +    /* The i-cluster coordinates for simple search */
 +    gmx_mm_pr ix_SSE0,iy_SSE0,iz_SSE0;
 +    gmx_mm_pr ix_SSE2,iy_SSE2,iz_SSE2;
 +} nbnxn_x_ci_simd_2xnn_t;
 +
 +#endif
 +
 +/* Working data for the actual i-supercell during pair search */
 +typedef struct nbnxn_list_work {
 +    gmx_cache_protect_t cp0; /* Protect cache between threads               */
 +
 +    float *bb_ci;      /* The bounding boxes, pbc shifted, for each cluster */
 +    real  *x_ci;       /* The coordinates, pbc shifted, for each atom       */
 +#ifdef GMX_NBNXN_SIMD
 +    nbnxn_x_ci_simd_4xn_t *x_ci_simd_4xn;
 +    nbnxn_x_ci_simd_2xnn_t *x_ci_simd_2xnn;
 +#endif
 +    int  cj_ind;       /* The current cj_ind index for the current list     */
 +    int  cj4_init;     /* The first unitialized cj4 block                   */
 +
 +    float *d2;         /* Bounding box distance work array                  */
 +
 +    nbnxn_cj_t *cj;    /* The j-cell list                                   */
 +    int  cj_nalloc;    /* Allocation size of cj                             */
 +
 +    int ncj_noq;       /* Nr. of cluster pairs without Coul for flop count  */
 +    int ncj_hlj;       /* Nr. of cluster pairs with 1/2 LJ for flop count   */
 +
 +    gmx_cache_protect_t cp1; /* Protect cache between threads               */
 +} nbnxn_list_work_t;
 +
 +/* Function type for setting the i-atom coordinate working data */
 +typedef void
 +gmx_icell_set_x_t(int ci,
 +                  real shx,real shy,real shz,
 +                  int na_c,
 +                  int stride,const real *x,
 +                  nbnxn_list_work_t *work);
 +
 +static gmx_icell_set_x_t icell_set_x_simple;
 +#ifdef GMX_NBNXN_SIMD
 +static gmx_icell_set_x_t icell_set_x_simple_simd_4xn;
 +static gmx_icell_set_x_t icell_set_x_simple_simd_2xnn;
 +#endif
 +static gmx_icell_set_x_t icell_set_x_supersub;
 +#ifdef NBNXN_SEARCH_SSE
 +static gmx_icell_set_x_t icell_set_x_supersub_sse8;
 +#endif
 +
 +#undef GMX_MM128_HERE
 +#undef GMX_MM256_HERE
 +
 +/* Local cycle count struct for profiling */
 +typedef struct {
 +    int          count;
 +    gmx_cycles_t c;
 +    gmx_cycles_t start;
 +} nbnxn_cycle_t;
 +
 +/* Local cycle count enum for profiling */
 +enum { enbsCCgrid, enbsCCsearch, enbsCCcombine, enbsCCreducef, enbsCCnr };
 +
 +/* Thread-local work struct, contains part of nbnxn_grid_t */
 +typedef struct {
 +    gmx_cache_protect_t cp0;
 +
 +    int *cxy_na;
 +    int cxy_na_nalloc;
 +
 +    int  *sort_work;
 +    int  sort_work_nalloc;
 +
 +    nbnxn_buffer_flags_t buffer_flags; /* Flags for force buffer access */
 +
 +    int  ndistc;         /* Number of distance checks for flop counting */
 +
 +    nbnxn_cycle_t cc[enbsCCnr];
 +
 +    gmx_cache_protect_t cp1;
 +} nbnxn_search_work_t;
 +
 +/* Main pair-search struct, contains the grid(s), not the pair-list(s) */
 +typedef struct nbnxn_search {
 +    int  ePBC;            /* PBC type enum                              */
 +    matrix box;           /* The periodic unit-cell                     */
 +
 +    gmx_bool DomDec;      /* Are we doing domain decomposition?         */
 +    ivec dd_dim;          /* Are we doing DD in x,y,z?                  */
 +    gmx_domdec_zones_t *zones; /* The domain decomposition zones        */
 +
 +    int  ngrid;           /* The number of grids, equal to #DD-zones    */
 +    nbnxn_grid_t *grid;   /* Array of grids, size ngrid                 */
 +    int  *cell;           /* Actual allocated cell array for all grids  */
 +    int  cell_nalloc;     /* Allocation size of cell                    */
 +    int  *a;              /* Atom index for grid, the inverse of cell   */
 +    int  a_nalloc;        /* Allocation size of a                       */
 +
 +    int  natoms_local;    /* The local atoms run from 0 to natoms_local */
 +    int  natoms_nonlocal; /* The non-local atoms run from natoms_local
 +                           * to natoms_nonlocal */
 +
 +    gmx_bool print_cycles;
 +    int      search_count;
 +    nbnxn_cycle_t cc[enbsCCnr];
 +
 +    gmx_icell_set_x_t *icell_set_x; /* Function for setting i-coords    */
 +
 +    int  nthread_max;     /* Maximum number of threads for pair-search  */
 +    nbnxn_search_work_t *work; /* Work array, size nthread_max          */
 +} nbnxn_search_t_t;
 +
 +
 +static void nbs_cycle_start(nbnxn_cycle_t *cc)
 +{
 +    cc->start = gmx_cycles_read();
 +}
 +
 +static void nbs_cycle_stop(nbnxn_cycle_t *cc)
 +{
 +    cc->c += gmx_cycles_read() - cc->start;
 +    cc->count++;
 +}
 +
 +
 +#ifdef __cplusplus
 +}
 +#endif
 +
 +#endif
index f915050caf92cbe35a2ebd0e607138cc18cd8a9e,0000000000000000000000000000000000000000..dc43b39f05858802f2307de0961f95e0e932ca99
mode 100644,000000..100644
--- /dev/null
@@@ -1,377 -1,0 +1,377 @@@
-             for(jm=0; jm<4; jm++)
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * GROningen Mixture of Alchemy and Childrens' Stories
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +
 +#include "types/simple.h"
 +#include "maths.h"
 +#include "vec.h"
 +#include "typedefs.h"
 +#include "force.h"
 +#include "nbnxn_kernel_gpu_ref.h"
 +#include "../nbnxn_consts.h"
 +#include "nbnxn_kernel_common.h"
 +
 +#define NCL_PER_SUPERCL         (NBNXN_GPU_NCLUSTER_PER_SUPERCLUSTER)
 +#define CL_SIZE                 (NBNXN_GPU_CLUSTER_SIZE)
 +
 +void
 +nbnxn_kernel_gpu_ref(const nbnxn_pairlist_t     *nbl,
 +                     const nbnxn_atomdata_t     *nbat,
 +                     const interaction_const_t  *iconst,
 +                     rvec                       *shift_vec,
 +                     int                        force_flags,
 +                     int                        clearF,
 +                     real *                     f,
 +                     real *                     fshift,
 +                     real *                     Vc,
 +                     real *                     Vvdw)
 +{
 +    const nbnxn_sci_t *nbln;
 +    const real    *x;
 +    gmx_bool      bEner;
 +    gmx_bool      bEwald;
 +    const real    *Ftab=NULL;
 +    real          rcut2,rvdw2,rlist2;
 +    int           ntype;
 +    real          facel;
 +    int           n;
 +    int           ish3;
 +    int           sci;
 +    int           cj4_ind0,cj4_ind1,cj4_ind;
 +    int           ci,cj;
 +    int           ic,jc,ia,ja,is,ifs,js,jfs,im,jm;
 +    int           n0;
 +    int           ggid;
 +    real          shX,shY,shZ;
 +    real          fscal,tx,ty,tz;
 +    real          rinvsq;
 +    real          iq;
 +    real          qq,vcoul=0,krsq,vctot;
 +    int           nti;
 +    int           tj;
 +    real          rt,r,eps;
 +    real          rinvsix;
 +    real          Vvdwtot;
 +    real          Vvdw_rep,Vvdw_disp;
 +    real          ix,iy,iz,fix,fiy,fiz;
 +    real          jx,jy,jz;
 +    real          dx,dy,dz,rsq,rinv;
 +    int           int_bit;
 +    real          fexcl;
 +    real          c6,c12,cexp1,cexp2,br;
 +    const real *  shiftvec;
 +    real *        vdwparam;
 +    int *         shift;
 +    int *         type;
 +    const nbnxn_excl_t *excl[2];
 +
 +    int           npair_tot,npair;
 +    int           nhwu,nhwu_pruned;
 +
 +    if (nbl->na_ci != CL_SIZE)
 +    {
 +        gmx_fatal(FARGS,"The neighborlist cluster size in the GPU reference kernel is %d, expected it to be %d",nbl->na_ci,CL_SIZE);
 +    }
 +
 +    if (clearF == enbvClearFYes)
 +    {
 +        clear_f(nbat, 0, f);
 +    }
 +
 +    bEner = (force_flags & GMX_FORCE_ENERGY);
 +
 +    bEwald = EEL_FULL(iconst->eeltype);
 +    if (bEwald)
 +    {
 +        Ftab = iconst->tabq_coul_F;
 +    }
 +
 +    rcut2               = iconst->rcoulomb*iconst->rcoulomb;
 +    rvdw2               = iconst->rvdw*iconst->rvdw;
 +
 +    rlist2              = nbl->rlist*nbl->rlist;
 +
 +    type                = nbat->type;
 +    facel               = iconst->epsfac;
 +    shiftvec            = shift_vec[0];
 +    vdwparam            = nbat->nbfp;
 +    ntype               = nbat->ntype;
 +
 +    x = nbat->x;
 +
 +    npair_tot   = 0;
 +    nhwu        = 0;
 +    nhwu_pruned = 0;
 +
 +    for(n=0; n<nbl->nsci; n++)
 +    {
 +        nbln = &nbl->sci[n];
 +
 +        ish3             = 3*nbln->shift;     
 +        shX              = shiftvec[ish3];  
 +        shY              = shiftvec[ish3+1];
 +        shZ              = shiftvec[ish3+2];
 +        cj4_ind0         = nbln->cj4_ind_start;      
 +        cj4_ind1         = nbln->cj4_ind_end;    
 +        sci              = nbln->sci;
 +        vctot            = 0;              
 +        Vvdwtot          = 0;              
 +
 +        if (nbln->shift == CENTRAL &&
 +            nbl->cj4[cj4_ind0].cj[0] == sci*NCL_PER_SUPERCL)
 +        {
 +            /* we have the diagonal:
 +             * add the charge self interaction energy term
 +             */
 +            for(im=0; im<NCL_PER_SUPERCL; im++)
 +            {
 +                ci = sci*NCL_PER_SUPERCL + im;
 +                for (ic=0; ic<CL_SIZE; ic++)
 +                {
 +                    ia     = ci*CL_SIZE + ic;
 +                    iq     = x[ia*nbat->xstride+3];
 +                    vctot += iq*iq;
 +                }
 +            }
 +            if (!bEwald)
 +            {
 +                vctot *= -facel*0.5*iconst->c_rf;
 +            }
 +            else
 +            {
 +                /* last factor 1/sqrt(pi) */
 +                vctot *= -facel*iconst->ewaldcoeff*M_1_SQRTPI;
 +            }
 +        }
 +        
 +        for(cj4_ind=cj4_ind0; (cj4_ind<cj4_ind1); cj4_ind++)
 +        {
 +            excl[0]           = &nbl->excl[nbl->cj4[cj4_ind].imei[0].excl_ind];
 +            excl[1]           = &nbl->excl[nbl->cj4[cj4_ind].imei[1].excl_ind];
 +
++            for(jm=0; jm<NBNXN_GPU_JGROUP_SIZE; jm++)
 +            {
 +                cj               = nbl->cj4[cj4_ind].cj[jm];
 +
 +                for(im=0; im<NCL_PER_SUPERCL; im++)
 +                {
 +                    /* We're only using the first imask,
 +                     * but here imei[1].imask is identical.
 +                     */
 +                    if ((nbl->cj4[cj4_ind].imei[0].imask >> (jm*NCL_PER_SUPERCL+im)) & 1)
 +                    {
 +                        gmx_bool within_rlist;
 +
 +                        ci               = sci*NCL_PER_SUPERCL + im;
 +
 +                        within_rlist     = FALSE;
 +                        npair            = 0;
 +                        for(ic=0; ic<CL_SIZE; ic++)
 +                        {
 +                            ia               = ci*CL_SIZE + ic;
 +                    
 +                            is               = ia*nbat->xstride;
 +                            ifs              = ia*nbat->fstride;
 +                            ix               = shX + x[is+0];
 +                            iy               = shY + x[is+1];
 +                            iz               = shZ + x[is+2];
 +                            iq               = facel*x[is+3];
 +                            nti              = ntype*2*type[ia];
 +                    
 +                            fix              = 0;
 +                            fiy              = 0;
 +                            fiz              = 0;
 +
 +                            for(jc=0; jc<CL_SIZE; jc++)
 +                            {
 +                                ja               = cj*CL_SIZE + jc;
 +
 +                                if (nbln->shift == CENTRAL &&
 +                                    ci == cj && ja <= ia)
 +                                {
 +                                    continue;
 +                                }
 +                        
 +                                int_bit = ((excl[jc>>2]->pair[(jc & 3)*CL_SIZE+ic] >> (jm*NCL_PER_SUPERCL+im)) & 1); 
 +
 +                                js               = ja*nbat->xstride;
 +                                jfs              = ja*nbat->fstride;
 +                                jx               = x[js+0];      
 +                                jy               = x[js+1];      
 +                                jz               = x[js+2];      
 +                                dx               = ix - jx;      
 +                                dy               = iy - jy;      
 +                                dz               = iz - jz;      
 +                                rsq              = dx*dx + dy*dy + dz*dz;
 +                                if (rsq < rlist2)
 +                                {
 +                                    within_rlist = TRUE;
 +                                }
 +                                if (rsq >= rcut2)
 +                                {
 +                                    continue;
 +                                }
 +
 +                                if (type[ia] != ntype-1 && type[ja] != ntype-1)
 +                                {
 +                                    npair++;
 +                                }
 +
 +                                /* avoid NaN for excluded pairs at r=0 */
 +                                rsq             += (1.0 - int_bit)*NBNXN_AVOID_SING_R2_INC;
 +
 +                                rinv             = gmx_invsqrt(rsq);
 +                                rinvsq           = rinv*rinv;  
 +                                fscal            = 0;
 +                        
 +                                qq               = iq*x[js+3];
 +                                if (!bEwald)
 +                                {
 +                                    /* Reaction-field */
 +                                    krsq  = iconst->k_rf*rsq;
 +                                    fscal = qq*(int_bit*rinv - 2*krsq)*rinvsq;
 +                                    if (bEner)
 +                                    {
 +                                        vcoul = qq*(int_bit*rinv + krsq - iconst->c_rf);
 +                                    }
 +                                }
 +                                else
 +                                {
 +                                    r     = rsq*rinv;
 +                                    rt    = r*iconst->tabq_scale;
 +                                    n0    = rt;
 +                                    eps   = rt - n0;
 +
 +                                    fexcl = (1 - eps)*Ftab[n0] + eps*Ftab[n0+1];
 +
 +                                    fscal = qq*(int_bit*rinvsq - fexcl)*rinv;
 +
 +                                    if (bEner)
 +                                    {
 +                                        vcoul = qq*((int_bit - gmx_erf(iconst->ewaldcoeff*r))*rinv - int_bit*iconst->sh_ewald);
 +                                    }
 +                                }
 +
 +                                if (rsq < rvdw2)
 +                                {
 +                                    tj        = nti + 2*type[ja];
 +
 +                                    /* Vanilla Lennard-Jones cutoff */
 +                                    c6        = vdwparam[tj];
 +                                    c12       = vdwparam[tj+1];
 +                                
 +                                    rinvsix   = int_bit*rinvsq*rinvsq*rinvsq;
 +                                    Vvdw_disp = c6*rinvsix;     
 +                                    Vvdw_rep  = c12*rinvsix*rinvsix;
 +                                    fscal    += (Vvdw_rep - Vvdw_disp)*rinvsq;
 +
 +                                    if (bEner)
 +                                    {
 +                                        vctot   += vcoul;
 +
 +                                        Vvdwtot +=
 +                                            (Vvdw_rep - int_bit*c12*iconst->sh_invrc6*iconst->sh_invrc6)/12 -
 +                                            (Vvdw_disp - int_bit*c6*iconst->sh_invrc6)/6;
 +                                    }
 +                                }
 +                                
 +                                tx        = fscal*dx;
 +                                ty        = fscal*dy;
 +                                tz        = fscal*dz;
 +                                fix       = fix + tx;
 +                                fiy       = fiy + ty;
 +                                fiz       = fiz + tz;
 +                                f[jfs+0] -= tx;
 +                                f[jfs+1] -= ty;
 +                                f[jfs+2] -= tz;
 +                            }
 +                            
 +                            f[ifs+0]        += fix;
 +                            f[ifs+1]        += fiy;
 +                            f[ifs+2]        += fiz;
 +                            fshift[ish3]     = fshift[ish3]   + fix;
 +                            fshift[ish3+1]   = fshift[ish3+1] + fiy;
 +                            fshift[ish3+2]   = fshift[ish3+2] + fiz;
 +
 +                            /* Count in half work-units.
 +                             * In CUDA one work-unit is 2 warps.
 +                             */
 +                            if ((ic+1) % (CL_SIZE/2) == 0)
 +                            {
 +                                npair_tot += npair;
 +
 +                                nhwu++;
 +                                if (within_rlist)
 +                                {
 +                                    nhwu_pruned++;
 +                                }
 +
 +                                within_rlist = FALSE;
 +                                npair        = 0;
 +                            }
 +                        }
 +                    }
 +                }
 +            }
 +        }
 +        
 +        if (bEner)
 +        {
 +            ggid = 0;
 +            Vc[ggid]         = Vc[ggid]   + vctot;
 +            Vvdw[ggid]       = Vvdw[ggid] + Vvdwtot;
 +        }
 +    }
 +
 +    if (debug)
 +    {
 +        fprintf(debug,"number of half %dx%d atom pairs: %d after pruning: %d fraction %4.2f\n",
 +                nbl->na_ci,nbl->na_ci,
 +                nhwu,nhwu_pruned,nhwu_pruned/(double)nhwu);
 +        fprintf(debug,"generic kernel pair interactions:            %d\n",
 +                nhwu*nbl->na_ci/2*nbl->na_ci);
 +        fprintf(debug,"generic kernel post-prune pair interactions: %d\n",
 +                nhwu_pruned*nbl->na_ci/2*nbl->na_ci);
 +        fprintf(debug,"generic kernel non-zero pair interactions:   %d\n",
 +                npair_tot);
 +        fprintf(debug,"ratio non-zero/post-prune pair interactions: %4.2f\n",
 +                npair_tot/(double)(nhwu_pruned*nbl->na_ci/2*nbl->na_ci));
 +    }
 +}
index f248d3c121680ff695c40af6390343a26905589e,0000000000000000000000000000000000000000..08d8bc640ed1dfc4f79abeb357f63e53b9141fa3
mode 100644,000000..100644
--- /dev/null
@@@ -1,365 -1,0 +1,371 @@@
-     gmx_bool   half_LJ,do_coul;
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2009, The GROMACS Development Team
 + *
 + * Gromacs is a library for molecular simulation and trajectory analysis,
 + * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
 + * a full list of developers and information, check out http://www.gromacs.org
 + *
 + * This program is free software; you can redistribute it and/or modify it under 
 + * the terms of the GNU Lesser General Public License as published by the Free 
 + * Software Foundation; either version 2 of the License, or (at your option) any 
 + * later version.
 + * As a special exception, you may use this file as part of a free software
 + * library without restriction.  Specifically, if other files instantiate
 + * templates or use macros or inline functions from this file, or you compile
 + * this file and link it with other files to produce an executable, this
 + * file does not by itself cause the resulting executable to be covered by
 + * the GNU Lesser General Public License.  
 + *
 + * In plain-speak: do not worry about classes/macros/templates either - only
 + * changes to the library have to be LGPL, not an application linking with it.
 + *
 + * To help fund GROMACS development, we humbly ask that you cite
 + * the papers people have written on it - you can find them on the website!
 + */
 +
 +#define UNROLLI    NBNXN_CPU_CLUSTER_I_SIZE
 +#define UNROLLJ    NBNXN_CPU_CLUSTER_I_SIZE
 +
 +/* We could use nbat->xstride and nbat->fstride, but macros might be faster */
 +#define X_STRIDE   3
 +#define F_STRIDE   3
 +/* Local i-atom buffer strides */
 +#define XI_STRIDE  3
 +#define FI_STRIDE  3
 +
 +
 +/* All functionality defines are set here, except for:
 + * CALC_ENERGIES, ENERGY_GROUPS which are defined before.
 + * CHECK_EXCLS, which is set just before including the inner loop contents.
 + */
 +
 +/* We always calculate shift forces, because it's cheap anyhow */
 +#define CALC_SHIFTFORCES
 +
 +#ifdef CALC_COUL_RF
 +#define NBK_FUNC_NAME(x,y) x##_rf_##y
 +#endif
 +#ifdef CALC_COUL_TAB
 +#ifndef VDW_CUTOFF_CHECK
 +#define NBK_FUNC_NAME(x,y) x##_tab_##y
 +#else
 +#define NBK_FUNC_NAME(x,y) x##_tab_twin_##y
 +#endif
 +#endif
 +
 +static void
 +#ifndef CALC_ENERGIES
 +NBK_FUNC_NAME(nbnxn_kernel_ref,noener)
 +#else
 +#ifndef ENERGY_GROUPS
 +NBK_FUNC_NAME(nbnxn_kernel_ref,ener)
 +#else
 +NBK_FUNC_NAME(nbnxn_kernel_ref,energrp)
 +#endif
 +#endif
 +#undef NBK_FUNC_NAME
 +                            (const nbnxn_pairlist_t     *nbl,
 +                             const nbnxn_atomdata_t     *nbat,
 +                             const interaction_const_t  *ic,
 +                             rvec                       *shift_vec,
 +                             real                       *f
 +#ifdef CALC_SHIFTFORCES
 +                             ,
 +                             real                       *fshift
 +#endif
 +#ifdef CALC_ENERGIES
 +                             ,
 +                             real                       *Vvdw,
 +                             real                       *Vc
 +#endif
 +                            )
 +{
 +    const nbnxn_ci_t   *nbln;
 +    const nbnxn_cj_t   *l_cj;
 +    const int          *type;
 +    const real         *q;
 +    const real         *shiftvec;
 +    const real         *x;
 +    const real         *nbfp;
 +    real       rcut2;
 +#ifdef VDW_CUTOFF_CHECK
 +    real       rvdw2;
 +#endif
 +    int        ntype2;
 +    real       facel;
 +    real       *nbfp_i;
 +    int        n,ci,ci_sh;
 +    int        ish,ishf;
-         half_LJ = (nbln->shift & NBNXN_CI_HALF_LJ(0));
++    gmx_bool   do_LJ,half_LJ,do_coul;
 +    int        cjind0,cjind1,cjind;
 +    int        ip,jp;
 +
 +    real       xi[UNROLLI*XI_STRIDE];
 +    real       fi[UNROLLI*FI_STRIDE];
 +    real       qi[UNROLLI];
 +
 +#ifdef CALC_ENERGIES
 +#ifndef ENERGY_GROUPS
 +
 +    real       Vvdw_ci,Vc_ci;
 +#else
 +    int        egp_mask;
 +    int        egp_sh_i[UNROLLI];
 +#endif
 +    real       sh_invrc6;
 +#endif
 +
 +#ifdef CALC_COUL_RF
 +    real       k_rf2;
 +#ifdef CALC_ENERGIES
 +    real       k_rf,c_rf;
 +#endif
 +#endif
 +#ifdef CALC_COUL_TAB
 +    real       tabscale;
 +#ifdef CALC_ENERGIES
 +    real       halfsp;
 +#endif
 +#ifndef GMX_DOUBLE
 +    const real *tab_coul_FDV0;
 +#else
 +    const real *tab_coul_F;
 +    const real *tab_coul_V;
 +#endif
 +#endif
 +
 +    int ninner;
 +
 +#ifdef COUNT_PAIRS
 +    int npair=0;
 +#endif
 +
 +#ifdef CALC_ENERGIES
 +    sh_invrc6 = ic->sh_invrc6;
 +#endif
 +
 +#ifdef CALC_COUL_RF
 +    k_rf2 = 2*ic->k_rf;
 +#ifdef CALC_ENERGIES
 +    k_rf = ic->k_rf;
 +    c_rf = ic->c_rf;
 +#endif
 +#endif
 +#ifdef CALC_COUL_TAB
 +    tabscale = ic->tabq_scale;
 +#ifdef CALC_ENERGIES
 +    halfsp = 0.5/ic->tabq_scale;
 +#endif
 +
 +#ifndef GMX_DOUBLE
 +    tab_coul_FDV0 = ic->tabq_coul_FDV0;
 +#else
 +    tab_coul_F    = ic->tabq_coul_F;
 +    tab_coul_V    = ic->tabq_coul_V;
 +#endif
 +#endif
 +
 +#ifdef ENERGY_GROUPS
 +    egp_mask = (1<<nbat->neg_2log) - 1;
 +#endif
 +
 +
 +    rcut2               = ic->rcoulomb*ic->rcoulomb;
 +#ifdef VDW_CUTOFF_CHECK
 +    rvdw2               = ic->rvdw*ic->rvdw;
 +#endif
 +
 +    ntype2              = nbat->ntype*2;
 +    nbfp                = nbat->nbfp;
 +    q                   = nbat->q;
 +    type                = nbat->type;
 +    facel               = ic->epsfac;
 +    shiftvec            = shift_vec[0];
 +    x                   = nbat->x;
 +
 +    l_cj = nbl->cj;
 +
 +    ninner = 0;
 +    for(n=0; n<nbl->nci; n++)
 +    {
 +        int i,d;
 +
 +        nbln = &nbl->ci[n];
 +
 +        ish              = (nbln->shift & NBNXN_CI_SHIFT);
 +        /* x, f and fshift are assumed to be stored with stride 3 */
 +        ishf             = ish*DIM;
 +        cjind0           = nbln->cj_ind_start;
 +        cjind1           = nbln->cj_ind_end;
 +        /* Currently only works super-cells equal to sub-cells */
 +        ci               = nbln->ci;
 +        ci_sh            = (ish == CENTRAL ? ci : -1);
 +
-         /* With half_LJ we currently always calculate Coulomb interactions */
-         if (do_coul || half_LJ)
++        /* We have 5 LJ/C combinations, but use only three inner loops,
++         * as the other combinations are unlikely and/or not much faster:
++         * inner half-LJ + C for half-LJ + C / no-LJ + C
++         * inner LJ + C      for full-LJ + C
++         * inner LJ          for full-LJ + no-C / half-LJ + no-C
++         */
++        do_LJ   = (nbln->shift & NBNXN_CI_DO_LJ(0));
 +        do_coul = (nbln->shift & NBNXN_CI_DO_COUL(0));
++        half_LJ = ((nbln->shift & NBNXN_CI_HALF_LJ(0)) || !do_LJ) && do_coul;
 +
 +#ifdef CALC_ENERGIES
 +#ifndef ENERGY_GROUPS
 +        Vvdw_ci = 0;
 +        Vc_ci   = 0;
 +#else
 +        for(i=0; i<UNROLLI; i++)
 +        {
 +            egp_sh_i[i] = ((nbat->energrp[ci]>>(i*nbat->neg_2log)) & egp_mask)*nbat->nenergrp;
 +        }
 +#endif
 +#endif
 +
 +        for(i=0; i<UNROLLI; i++)
 +        {
 +            for(d=0; d<DIM; d++)
 +            {
 +                xi[i*XI_STRIDE+d] = x[(ci*UNROLLI+i)*X_STRIDE+d] + shiftvec[ishf+d];
 +                fi[i*FI_STRIDE+d] = 0;
 +            }
 +        }
 +
++        if (do_coul)
 +        {
 +#ifdef CALC_ENERGIES
 +            real Vc_sub_self;
 +
 +#ifdef CALC_COUL_RF
 +            Vc_sub_self = 0.5*c_rf;
 +#endif
 +#ifdef CALC_COUL_TAB
 +#ifdef GMX_DOUBLE
 +            Vc_sub_self = 0.5*tab_coul_V[0];
 +#else
 +            Vc_sub_self = 0.5*tab_coul_FDV0[2];
 +#endif
 +#endif
 +#endif
 +
 +            for(i=0; i<UNROLLI; i++)
 +            {
 +                qi[i] = facel*q[ci*UNROLLI+i];
 +
 +#ifdef CALC_ENERGIES
 +                if (l_cj[nbln->cj_ind_start].cj == ci_sh)
 +                {
 +#ifdef ENERGY_GROUPS
 +                    Vc[egp_sh_i[i]+((nbat->energrp[ci]>>(i*nbat->neg_2log)) & egp_mask)]
 +#else
 +                    Vc[0]
 +#endif
 +                        -= qi[i]*q[ci*UNROLLI+i]*Vc_sub_self;
 +                }
 +#endif
 +            }
 +        }
 +
 +        cjind = cjind0;
 +        while (cjind < cjind1 && nbl->cj[cjind].excl != 0xffff)
 +        {
 +#define CHECK_EXCLS
 +            if (half_LJ)
 +            {
 +#define CALC_COULOMB
 +#define HALF_LJ
 +#include "nbnxn_kernel_ref_inner.h"
 +#undef HALF_LJ
 +#undef CALC_COULOMB
 +            }
 +            /* cppcheck-suppress duplicateBranch */
 +            else if (do_coul)
 +            {
 +#define CALC_COULOMB
 +#include "nbnxn_kernel_ref_inner.h"
 +#undef CALC_COULOMB
 +            }
 +            else
 +            {
 +#include "nbnxn_kernel_ref_inner.h"
 +            }
 +#undef CHECK_EXCLS
 +            cjind++;
 +        }
 +
 +        for(; (cjind<cjind1); cjind++)
 +        {
 +            if (half_LJ)
 +            {
 +#define CALC_COULOMB
 +#define HALF_LJ
 +#include "nbnxn_kernel_ref_inner.h"
 +#undef HALF_LJ
 +#undef CALC_COULOMB
 +            }
 +            /* cppcheck-suppress duplicateBranch */
 +            else if (do_coul)
 +            {
 +#define CALC_COULOMB
 +#include "nbnxn_kernel_ref_inner.h"
 +#undef CALC_COULOMB
 +            }
 +            else
 +            {
 +#include "nbnxn_kernel_ref_inner.h"
 +            }
 +        }
 +        ninner += cjind1 - cjind0;
 +
 +        /* Add accumulated i-forces to the force array */
 +        for(i=0; i<UNROLLI; i++)
 +        {
 +            for(d=0; d<DIM; d++)
 +            {
 +                f[(ci*UNROLLI+i)*F_STRIDE+d] += fi[i*FI_STRIDE+d];
 +            }
 +        }
 +#ifdef CALC_SHIFTFORCES
 +        if (fshift != NULL)
 +        {
 +            /* Add i forces to shifted force list */
 +            for(i=0; i<UNROLLI; i++)
 +            {
 +                for(d=0; d<DIM; d++)
 +                {
 +                    fshift[ishf+d] += fi[i*FI_STRIDE+d];
 +                }
 +            }
 +        }
 +#endif
 +
 +#ifdef CALC_ENERGIES
 +#ifndef ENERGY_GROUPS
 +        *Vvdw += Vvdw_ci;
 +        *Vc   += Vc_ci;
 +#endif
 +#endif
 +      }
 +
 +#ifdef COUNT_PAIRS
 +    printf("atom pairs %d\n",npair);
 +#endif
 +}
 +
 +#undef CALC_SHIFTFORCES
 +
 +#undef X_STRIDE
 +#undef F_STRIDE
 +#undef XI_STRIDE
 +#undef FI_STRIDE
 +
 +#undef UNROLLI
 +#undef UNROLLJ
index c0f08bd5614733664b035f7cce225a61265f2cac,0000000000000000000000000000000000000000..5b16fc06a7326e90b870e86a9aee0ee89260229b
mode 100644,000000..100644
--- /dev/null
@@@ -1,4942 -1,0 +1,5031 @@@
- #ifdef NBNXN_SEARCH_SSE
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + *          GROningen MAchine for Chemical Simulations
 + *
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2012, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + *
 + * For more info, check our website at http://www.gromacs.org
 + */
 +
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +#include <string.h>
 +#include "sysstuff.h"
 +#include "smalloc.h"
 +#include "macros.h"
 +#include "maths.h"
 +#include "vec.h"
 +#include "pbc.h"
 +#include "nbnxn_consts.h"
 +#include "nbnxn_internal.h"
 +#include "nbnxn_atomdata.h"
 +#include "nbnxn_search.h"
 +#include "gmx_cyclecounter.h"
 +#include "gmxfio.h"
 +#include "gmx_omp_nthreads.h"
 +#include "nrnb.h"
 +
 +
 +/* Pair search box lower and upper corner in x,y,z.
 + * Store this in 4 iso 3 reals, which is useful with SSE.
 + * To avoid complicating the code we also use 4 without SSE.
 + */
 +#define NNBSBB_C         4
 +#define NNBSBB_B         (2*NNBSBB_C)
 +/* Pair search box lower and upper bound in z only. */
 +#define NNBSBB_D         2
 +/* Pair search box lower and upper corner x,y,z indices */
 +#define BBL_X  0
 +#define BBL_Y  1
 +#define BBL_Z  2
 +#define BBU_X  4
 +#define BBU_Y  5
 +#define BBU_Z  6
 +
 +
- #if defined NBNXN_SEARCH_SSE_SINGLE && GPU_NSUBCELL == 8
- #define NBNXN_8BB_SSE
++#ifdef NBNXN_SEARCH_BB_SSE
++/* We use SSE or AVX-128bit for bounding box calculations */
 +
 +#ifndef GMX_DOUBLE
++/* Single precision BBs + coordinates, we can also load coordinates using SSE */
 +#define NBNXN_SEARCH_SSE_SINGLE
 +#endif
 +
 +/* Include basic SSE2 stuff */
 +#include <emmintrin.h>
 +
- #define STRIDE_8BB        4
- #define STRIDE_8BB_2LOG   2
++#if defined NBNXN_SEARCH_SSE_SINGLE && (GPU_NSUBCELL == 4 || GPU_NSUBCELL == 8)
++/* Store bounding boxes with x, y and z coordinates in packs of 4 */
++#define NBNXN_PBB_SSE
 +#endif
 +
 +/* The width of SSE/AVX128 with single precision for bounding boxes with GPU.
 + * Here AVX-256 turns out to be slightly slower than AVX-128.
 + */
- #endif /* NBNXN_SEARCH_SSE */
++#define STRIDE_PBB        4
++#define STRIDE_PBB_2LOG   2
 +
- #ifdef NBNXN_SEARCH_SSE
++#endif /* NBNXN_SEARCH_BB_SSE */
 +
 +#ifdef GMX_NBNXN_SIMD
 +
 +/* The functions below are macros as they are performance sensitive */
 +
 +/* 4x4 list, pack=4: no complex conversion required */
 +/* i-cluster to j-cluster conversion */
 +#define CI_TO_CJ_J4(ci)   (ci)
 +/* cluster index to coordinate array index conversion */
 +#define X_IND_CI_J4(ci)  ((ci)*STRIDE_P4)
 +#define X_IND_CJ_J4(cj)  ((cj)*STRIDE_P4)
 +
 +/* 4x2 list, pack=4: j-cluster size is half the packing width */
 +/* i-cluster to j-cluster conversion */
 +#define CI_TO_CJ_J2(ci)  ((ci)<<1)
 +/* cluster index to coordinate array index conversion */
 +#define X_IND_CI_J2(ci)  ((ci)*STRIDE_P4)
 +#define X_IND_CJ_J2(cj)  (((cj)>>1)*STRIDE_P4 + ((cj) & 1)*(PACK_X4>>1))
 +
 +/* 4x8 list, pack=8: i-cluster size is half the packing width */
 +/* i-cluster to j-cluster conversion */
 +#define CI_TO_CJ_J8(ci)  ((ci)>>1)
 +/* cluster index to coordinate array index conversion */
 +#define X_IND_CI_J8(ci)  (((ci)>>1)*STRIDE_P8 + ((ci) & 1)*(PACK_X8>>1))
 +#define X_IND_CJ_J8(cj)  ((cj)*STRIDE_P8)
 +
 +/* The j-cluster size is matched to the SIMD width */
 +#if GMX_NBNXN_SIMD_BITWIDTH == 128
 +#ifdef GMX_DOUBLE
 +#define CI_TO_CJ_SIMD_4XN(ci)  CI_TO_CJ_J2(ci)
 +#define X_IND_CI_SIMD_4XN(ci)  X_IND_CI_J2(ci)
 +#define X_IND_CJ_SIMD_4XN(cj)  X_IND_CJ_J2(cj)
 +#else
 +#define CI_TO_CJ_SIMD_4XN(ci)  CI_TO_CJ_J4(ci)
 +#define X_IND_CI_SIMD_4XN(ci)  X_IND_CI_J4(ci)
 +#define X_IND_CJ_SIMD_4XN(cj)  X_IND_CJ_J4(cj)
 +#endif
 +#else
 +#if GMX_NBNXN_SIMD_BITWIDTH == 256
 +#ifdef GMX_DOUBLE
 +#define CI_TO_CJ_SIMD_4XN(ci)  CI_TO_CJ_J4(ci)
 +#define X_IND_CI_SIMD_4XN(ci)  X_IND_CI_J4(ci)
 +#define X_IND_CJ_SIMD_4XN(cj)  X_IND_CJ_J4(cj)
 +#else
 +#define CI_TO_CJ_SIMD_4XN(ci)  CI_TO_CJ_J8(ci)
 +#define X_IND_CI_SIMD_4XN(ci)  X_IND_CI_J8(ci)
 +#define X_IND_CJ_SIMD_4XN(cj)  X_IND_CJ_J8(cj)
 +/* Half SIMD with j-cluster size */
 +#define CI_TO_CJ_SIMD_2XNN(ci) CI_TO_CJ_J4(ci)
 +#define X_IND_CI_SIMD_2XNN(ci) X_IND_CI_J4(ci)
 +#define X_IND_CJ_SIMD_2XNN(cj) X_IND_CJ_J4(cj)
 +#endif
 +#else
 +#error "unsupported GMX_NBNXN_SIMD_WIDTH"
 +#endif
 +#endif
 +
 +#endif /* GMX_NBNXN_SIMD */
 +
 +
 +/* Interaction masks for 4xN atom interactions.
 + * Bit i*CJ_SIZE + j tells if atom i and j interact.
 + */
 +/* All interaction mask is the same for all kernels */
 +#define NBNXN_INT_MASK_ALL        0xffffffff
 +/* 4x4 kernel diagonal mask */
 +#define NBNXN_INT_MASK_DIAG       0x08ce
 +/* 4x2 kernel diagonal masks */
 +#define NBNXN_INT_MASK_DIAG_J2_0  0x0002
 +#define NBNXN_INT_MASK_DIAG_J2_1  0x002F
 +/* 4x8 kernel diagonal masks */
 +#define NBNXN_INT_MASK_DIAG_J8_0  0xf0f8fcfe
 +#define NBNXN_INT_MASK_DIAG_J8_1  0x0080c0e0
 +
 +
- #define NNBSBB_XXXX      (NNBSBB_D*DIM*STRIDE_8BB)
++#ifdef NBNXN_SEARCH_BB_SSE
 +/* Store bounding boxes corners as quadruplets: xxxxyyyyzzzz */
 +#define NBNXN_BBXXXX
 +/* Size of bounding box corners quadruplet */
- #ifdef NBNXN_8BB_SSE
-         bb_nalloc = grid->nc_nalloc*GPU_NSUBCELL/STRIDE_8BB*NNBSBB_XXXX;
++#define NNBSBB_XXXX      (NNBSBB_D*DIM*STRIDE_PBB)
 +#endif
 +
 +/* We shift the i-particles backward for PBC.
 + * This leads to more conditionals than shifting forward.
 + * We do this to get more balanced pair lists.
 + */
 +#define NBNXN_SHIFT_BACKWARD
 +
 +
 +/* This define is a lazy way to avoid interdependence of the grid
 + * and searching data structures.
 + */
 +#define NBNXN_NA_SC_MAX (GPU_NSUBCELL*NBNXN_GPU_CLUSTER_SIZE)
 +
 +
 +static void nbs_cycle_clear(nbnxn_cycle_t *cc)
 +{
 +    int i;
 +
 +    for(i=0; i<enbsCCnr; i++)
 +    {
 +        cc[i].count = 0;
 +        cc[i].c     = 0;
 +    }
 +}
 +
 +static double Mcyc_av(const nbnxn_cycle_t *cc)
 +{
 +    return (double)cc->c*1e-6/cc->count;
 +}
 +
 +static void nbs_cycle_print(FILE *fp,const nbnxn_search_t nbs)
 +{
 +    int n;
 +    int t;
 +
 +    fprintf(fp,"\n");
 +    fprintf(fp,"ns %4d grid %4.1f search %4.1f red.f %5.3f",
 +            nbs->cc[enbsCCgrid].count,
 +            Mcyc_av(&nbs->cc[enbsCCgrid]),
 +            Mcyc_av(&nbs->cc[enbsCCsearch]),
 +            Mcyc_av(&nbs->cc[enbsCCreducef]));
 +
 +    if (nbs->nthread_max > 1)
 +    {
 +        if (nbs->cc[enbsCCcombine].count > 0)
 +        {
 +            fprintf(fp," comb %5.2f",
 +                    Mcyc_av(&nbs->cc[enbsCCcombine]));
 +        }
 +        fprintf(fp," s. th");
 +        for(t=0; t<nbs->nthread_max; t++)
 +        {
 +            fprintf(fp," %4.1f",
 +                    Mcyc_av(&nbs->work[t].cc[enbsCCsearch]));
 +        }
 +    }
 +    fprintf(fp,"\n");
 +}
 +
 +static void nbnxn_grid_init(nbnxn_grid_t * grid)
 +{
 +    grid->cxy_na      = NULL;
 +    grid->cxy_ind     = NULL;
 +    grid->cxy_nalloc  = 0;
 +    grid->bb          = NULL;
 +    grid->bbj         = NULL;
 +    grid->nc_nalloc   = 0;
 +}
 +
 +static int get_2log(int n)
 +{
 +    int log2;
 +
 +    log2 = 0;
 +    while ((1<<log2) < n)
 +    {
 +        log2++;
 +    }
 +    if ((1<<log2) != n)
 +    {
 +        gmx_fatal(FARGS,"nbnxn na_c (%d) is not a power of 2",n);
 +    }
 +
 +    return log2;
 +}
 +
 +static int nbnxn_kernel_to_ci_size(int nb_kernel_type)
 +{
 +    switch (nb_kernel_type)
 +    {
 +    case nbnxnk4x4_PlainC:
 +    case nbnxnk4xN_SIMD_4xN:
 +    case nbnxnk4xN_SIMD_2xNN:
 +        return NBNXN_CPU_CLUSTER_I_SIZE;
 +    case nbnxnk8x8x8_CUDA:
 +    case nbnxnk8x8x8_PlainC:
 +        /* The cluster size for super/sub lists is only set here.
 +         * Any value should work for the pair-search and atomdata code.
 +         * The kernels, of course, might require a particular value.
 +         */
 +        return NBNXN_GPU_CLUSTER_SIZE;
 +    default:
 +        gmx_incons("unknown kernel type");
 +    }
 +
 +    return 0;
 +}
 +
 +int nbnxn_kernel_to_cj_size(int nb_kernel_type)
 +{
 +    int nbnxn_simd_width=0;
 +    int cj_size=0;
 +
 +#ifdef GMX_NBNXN_SIMD
 +    nbnxn_simd_width = GMX_NBNXN_SIMD_BITWIDTH/(sizeof(real)*8);
 +#endif
 +
 +    switch (nb_kernel_type)
 +    {
 +    case nbnxnk4x4_PlainC:
 +        cj_size = NBNXN_CPU_CLUSTER_I_SIZE;
 +        break;
 +    case nbnxnk4xN_SIMD_4xN:
 +        cj_size = nbnxn_simd_width;
 +        break;
 +    case nbnxnk4xN_SIMD_2xNN:
 +        cj_size = nbnxn_simd_width/2;
 +        break;
 +    case nbnxnk8x8x8_CUDA:
 +    case nbnxnk8x8x8_PlainC:
 +        cj_size = nbnxn_kernel_to_ci_size(nb_kernel_type);
 +        break;
 +    default:
 +        gmx_incons("unknown kernel type");
 +    }
 +
 +    return cj_size;
 +}
 +
 +static int ci_to_cj(int na_cj_2log,int ci)
 +{
 +    switch (na_cj_2log)
 +    {
 +    case 2: return  ci;     break;
 +    case 1: return (ci<<1); break;
 +    case 3: return (ci>>1); break;
 +    }
 +
 +    return 0;
 +}
 +
 +gmx_bool nbnxn_kernel_pairlist_simple(int nb_kernel_type)
 +{
 +    if (nb_kernel_type == nbnxnkNotSet)
 +    {
 +        gmx_fatal(FARGS, "Non-bonded kernel type not set for Verlet-style pair-list.");
 +    }
 +
 +    switch (nb_kernel_type)
 +    {
 +    case nbnxnk8x8x8_CUDA:
 +    case nbnxnk8x8x8_PlainC:
 +        return FALSE;
 +
 +    case nbnxnk4x4_PlainC:
 +    case nbnxnk4xN_SIMD_4xN:
 +    case nbnxnk4xN_SIMD_2xNN:
 +        return TRUE;
 +
 +    default:
 +        gmx_incons("Invalid nonbonded kernel type passed!");
 +        return FALSE;
 +    }
 +}
 +
 +void nbnxn_init_search(nbnxn_search_t * nbs_ptr,
 +                       ivec *n_dd_cells,
 +                       gmx_domdec_zones_t *zones,
 +                       int nthread_max)
 +{
 +    nbnxn_search_t nbs;
 +    int d,g,t;
 +
 +    snew(nbs,1);
 +    *nbs_ptr = nbs;
 +
 +    nbs->DomDec = (n_dd_cells != NULL);
 +
 +    clear_ivec(nbs->dd_dim);
 +    nbs->ngrid = 1;
 +    if (nbs->DomDec)
 +    {
 +        nbs->zones = zones;
 +
 +        for(d=0; d<DIM; d++)
 +        {
 +            if ((*n_dd_cells)[d] > 1)
 +            {
 +                nbs->dd_dim[d] = 1;
 +                /* Each grid matches a DD zone */
 +                nbs->ngrid *= 2;
 +            }
 +        }
 +    }
 +
 +    snew(nbs->grid,nbs->ngrid);
 +    for(g=0; g<nbs->ngrid; g++)
 +    {
 +        nbnxn_grid_init(&nbs->grid[g]);
 +    }
 +    nbs->cell        = NULL;
 +    nbs->cell_nalloc = 0;
 +    nbs->a           = NULL;
 +    nbs->a_nalloc    = 0;
 +
 +    nbs->nthread_max = nthread_max;
 +
 +    /* Initialize the work data structures for each thread */
 +    snew(nbs->work,nbs->nthread_max);
 +    for(t=0; t<nbs->nthread_max; t++)
 +    {
 +        nbs->work[t].cxy_na           = NULL;
 +        nbs->work[t].cxy_na_nalloc    = 0;
 +        nbs->work[t].sort_work        = NULL;
 +        nbs->work[t].sort_work_nalloc = 0;
 +    }
 +
 +    /* Initialize detailed nbsearch cycle counting */
 +    nbs->print_cycles = (getenv("GMX_NBNXN_CYCLE") != 0);
 +    nbs->search_count = 0;
 +    nbs_cycle_clear(nbs->cc);
 +    for(t=0; t<nbs->nthread_max; t++)
 +    {
 +        nbs_cycle_clear(nbs->work[t].cc);
 +    }
 +}
 +
 +static real grid_atom_density(int n,rvec corner0,rvec corner1)
 +{
 +    rvec size;
 +
 +    rvec_sub(corner1,corner0,size);
 +
 +    return n/(size[XX]*size[YY]*size[ZZ]);
 +}
 +
 +static int set_grid_size_xy(const nbnxn_search_t nbs,
 +                            nbnxn_grid_t *grid,
++                            int dd_zone,
 +                            int n,rvec corner0,rvec corner1,
 +                            real atom_density,
 +                            int XFormat)
 +{
 +    rvec size;
 +    int  na_c;
 +    real adens,tlen,tlen_x,tlen_y,nc_max;
 +    int  t;
 +
 +    rvec_sub(corner1,corner0,size);
 +
 +    if (n > grid->na_sc)
 +    {
 +        /* target cell length */
 +        if (grid->bSimple)
 +        {
 +            /* To minimize the zero interactions, we should make
 +             * the largest of the i/j cell cubic.
 +             */
 +            na_c = max(grid->na_c,grid->na_cj);
 +
 +            /* Approximately cubic cells */
 +            tlen   = pow(na_c/atom_density,1.0/3.0);
 +            tlen_x = tlen;
 +            tlen_y = tlen;
 +        }
 +        else
 +        {
 +            /* Approximately cubic sub cells */
 +            tlen   = pow(grid->na_c/atom_density,1.0/3.0);
 +            tlen_x = tlen*GPU_NSUBCELL_X;
 +            tlen_y = tlen*GPU_NSUBCELL_Y;
 +        }
 +        /* We round ncx and ncy down, because we get less cell pairs
 +         * in the nbsist when the fixed cell dimensions (x,y) are
 +         * larger than the variable one (z) than the other way around.
 +         */
 +        grid->ncx = max(1,(int)(size[XX]/tlen_x));
 +        grid->ncy = max(1,(int)(size[YY]/tlen_y));
 +    }
 +    else
 +    {
 +        grid->ncx = 1;
 +        grid->ncy = 1;
 +    }
 +
++    grid->sx = size[XX]/grid->ncx;
++    grid->sy = size[YY]/grid->ncy;
++    grid->inv_sx = 1/grid->sx;
++    grid->inv_sy = 1/grid->sy;
++
++    if (dd_zone > 0)
++    {
++        /* This is a non-home zone, add an extra row of cells
++         * for particles communicated for bonded interactions.
++         * These can be beyond the cut-off. It doesn't matter where
++         * they end up on the grid, but for performance it's better
++         * if they don't end up in cells that can be within cut-off range.
++         */
++        grid->ncx++;
++        grid->ncy++;
++    }
++
 +    /* We need one additional cell entry for particles moved by DD */
 +    if (grid->ncx*grid->ncy+1 > grid->cxy_nalloc)
 +    {
 +        grid->cxy_nalloc = over_alloc_large(grid->ncx*grid->ncy+1);
 +        srenew(grid->cxy_na,grid->cxy_nalloc);
 +        srenew(grid->cxy_ind,grid->cxy_nalloc+1);
 +    }
 +    for(t=0; t<nbs->nthread_max; t++)
 +    {
 +        if (grid->ncx*grid->ncy+1 > nbs->work[t].cxy_na_nalloc)
 +        {
 +            nbs->work[t].cxy_na_nalloc = over_alloc_large(grid->ncx*grid->ncy+1);
 +            srenew(nbs->work[t].cxy_na,nbs->work[t].cxy_na_nalloc);
 +        }
 +    }
 +
 +    /* Worst case scenario of 1 atom in each last cell */
 +    if (grid->na_cj <= grid->na_c)
 +    {
 +        nc_max = n/grid->na_sc + grid->ncx*grid->ncy;
 +    }
 +    else
 +    {
 +        nc_max = n/grid->na_sc + grid->ncx*grid->ncy*grid->na_cj/grid->na_c;
 +    }
 +
 +    if (nc_max > grid->nc_nalloc)
 +    {
 +        int bb_nalloc;
 +
 +        grid->nc_nalloc = over_alloc_large(nc_max);
 +        srenew(grid->nsubc,grid->nc_nalloc);
 +        srenew(grid->bbcz,grid->nc_nalloc*NNBSBB_D);
-     grid->sx = size[XX]/grid->ncx;
-     grid->sy = size[YY]/grid->ncy;
-     grid->inv_sx = 1/grid->sx;
-     grid->inv_sy = 1/grid->sy;
++#ifdef NBNXN_PBB_SSE
++        bb_nalloc = grid->nc_nalloc*GPU_NSUBCELL/STRIDE_PBB*NNBSBB_XXXX;
 +#else
 +        bb_nalloc = grid->nc_nalloc*GPU_NSUBCELL*NNBSBB_B;
 +#endif
 +        sfree_aligned(grid->bb);
 +        /* This snew also zeros the contents, this avoid possible
 +         * floating exceptions in SSE with the unused bb elements.
 +         */
 +        snew_aligned(grid->bb,bb_nalloc,16);
 +
 +        if (grid->bSimple)
 +        {
 +            if (grid->na_cj == grid->na_c)
 +            {
 +                grid->bbj = grid->bb;
 +            }
 +            else
 +            {
 +                sfree_aligned(grid->bbj);
 +                snew_aligned(grid->bbj,bb_nalloc*grid->na_c/grid->na_cj,16);
 +            }
 +        }
 +
 +        srenew(grid->flags,grid->nc_nalloc);
 +    }
 +
 +    copy_rvec(corner0,grid->c0);
 +    copy_rvec(corner1,grid->c1);
- #define SORT_GRID_OVERSIZE 2
 +
 +    return nc_max;
 +}
 +
-     int zi,zim;
++/* We need to sort paricles in grid columns on z-coordinate.
++ * As particle are very often distributed homogeneously, we a sorting
++ * algorithm similar to pigeonhole sort. We multiply the z-coordinate
++ * by a factor, cast to an int and try to store in that hole. If the hole
++ * is full, we move this or another particle. A second pass is needed to make
++ * contiguous elements. SORT_GRID_OVERSIZE is the ratio of holes to particles.
++ * 4 is the optimal value for homogeneous particle distribution and allows
++ * for an O(#particles) sort up till distributions were all particles are
++ * concentrated in 1/4 of the space. No NlogN fallback is implemented,
++ * as it can be expensive to detect imhomogeneous particle distributions.
++ * SGSF is the maximum ratio of holes used, in the worst case all particles
++ * end up in the last hole and we need #particles extra holes at the end.
++ */
++#define SORT_GRID_OVERSIZE 4
 +#define SGSF (SORT_GRID_OVERSIZE + 1)
 +
++/* Sort particle index a on coordinates x along dim.
++ * Backwards tells if we want decreasing iso increasing coordinates.
++ * h0 is the minimum of the coordinate range.
++ * invh is the inverse hole spacing.
++ * nsort, the theortical hole limit, is only used for debugging.
++ * sort is the sorting work array.
++ */
 +static void sort_atoms(int dim,gmx_bool Backwards,
 +                       int *a,int n,rvec *x,
 +                       real h0,real invh,int nsort,int *sort)
 +{
 +    int i,c;
-     /* For small oversize factors clearing the whole area is fastest.
-      * For large oversize we should clear the used elements after use.
-      */
-     for(i=0; i<nsort; i++)
-     {
-         sort[i] = -1;
-     }
++    int zi,zim,zi_min,zi_max;
 +    int cp,tmp;
 +
 +    if (n <= 1)
 +    {
 +        /* Nothing to do */
 +        return;
 +    }
 +
-         for(zi=nsort-1; zi>=0; zi--)
++    /* Determine the index range used, so we can limit it for the second pass */
++    zi_min = INT_MAX;
++    zi_max = -1;
++
 +    /* Sort the particles using a simple index sort */
 +    for(i=0; i<n; i++)
 +    {
 +        /* The cast takes care of float-point rounding effects below zero.
 +         * This code assumes particles are less than 1/SORT_GRID_OVERSIZE
 +         * times the box height out of the box.
 +         */
 +        zi = (int)((x[a[i]][dim] - h0)*invh);
 +
 +#ifdef DEBUG_NBNXN_GRIDDING
 +        if (zi < 0 || zi >= nsort)
 +        {
 +            gmx_fatal(FARGS,"(int)((x[%d][%c]=%f - %f)*%f) = %d, not in 0 - %d\n",
 +                      a[i],'x'+dim,x[a[i]][dim],h0,invh,zi,nsort);
 +        }
 +#endif
 +
 +        /* Ideally this particle should go in sort cell zi,
 +         * but that might already be in use,
 +         * in that case find the first empty cell higher up
 +         */
 +        if (sort[zi] < 0)
 +        {
 +            sort[zi] = a[i];
++            zi_min = min(zi_min,zi);
++            zi_max = max(zi_max,zi);
 +        }
 +        else
 +        {
 +            /* We have multiple atoms in the same sorting slot.
 +             * Sort on real z for minimal bounding box size.
 +             * There is an extra check for identical z to ensure
 +             * well-defined output order, independent of input order
 +             * to ensure binary reproducibility after restarts.
 +             */
 +            while(sort[zi] >= 0 && ( x[a[i]][dim] >  x[sort[zi]][dim] ||
 +                                    (x[a[i]][dim] == x[sort[zi]][dim] &&
 +                                     a[i] > sort[zi])))
 +            {
 +                zi++;
 +            }
 +
 +            if (sort[zi] >= 0)
 +            {
 +                /* Shift all elements by one slot until we find an empty slot */
 +                cp = sort[zi];
 +                zim = zi + 1;
 +                while (sort[zim] >= 0)
 +                {
 +                    tmp = sort[zim];
 +                    sort[zim] = cp;
 +                    cp  = tmp;
 +                    zim++;
 +                }
 +                sort[zim] = cp;
++                zi_max = max(zi_max,zim);
 +            }
 +            sort[zi] = a[i];
++            zi_max = max(zi_max,zi);
 +        }
 +    }
 +
 +    c = 0;
 +    if (!Backwards)
 +    {
 +        for(zi=0; zi<nsort; zi++)
 +        {
 +            if (sort[zi] >= 0)
 +            {
 +                a[c++] = sort[zi];
++                sort[zi] = -1;
 +            }
 +        }
 +    }
 +    else
 +    {
- #ifdef NBNXN_SEARCH_SSE
++        for(zi=zi_max; zi>=zi_min; zi--)
 +        {
 +            if (sort[zi] >= 0)
 +            {
 +                a[c++] = sort[zi];
++                sort[zi] = -1;
 +            }
 +        }
 +    }
 +    if (c < n)
 +    {
 +        gmx_incons("Lost particles while sorting");
 +    }
 +}
 +
 +#ifdef GMX_DOUBLE
 +#define R2F_D(x) ((float)((x) >= 0 ? ((1-GMX_FLOAT_EPS)*(x)) : ((1+GMX_FLOAT_EPS)*(x))))
 +#define R2F_U(x) ((float)((x) >= 0 ? ((1+GMX_FLOAT_EPS)*(x)) : ((1-GMX_FLOAT_EPS)*(x))))
 +#else
 +#define R2F_D(x) (x)
 +#define R2F_U(x) (x)
 +#endif
 +
 +/* Coordinate order x,y,z, bb order xyz0 */
 +static void calc_bounding_box(int na,int stride,const real *x,float *bb)
 +{
 +    int  i,j;
 +    real xl,xh,yl,yh,zl,zh;
 +
 +    i = 0;
 +    xl = x[i+XX];
 +    xh = x[i+XX];
 +    yl = x[i+YY];
 +    yh = x[i+YY];
 +    zl = x[i+ZZ];
 +    zh = x[i+ZZ];
 +    i += stride;
 +    for(j=1; j<na; j++)
 +    {
 +        xl = min(xl,x[i+XX]);
 +        xh = max(xh,x[i+XX]);
 +        yl = min(yl,x[i+YY]);
 +        yh = max(yh,x[i+YY]);
 +        zl = min(zl,x[i+ZZ]);
 +        zh = max(zh,x[i+ZZ]);
 +        i += stride;
 +    }
 +    /* Note: possible double to float conversion here */
 +    bb[BBL_X] = R2F_D(xl);
 +    bb[BBL_Y] = R2F_D(yl);
 +    bb[BBL_Z] = R2F_D(zl);
 +    bb[BBU_X] = R2F_U(xh);
 +    bb[BBU_Y] = R2F_U(yh);
 +    bb[BBU_Z] = R2F_U(zh);
 +}
 +
 +/* Packed coordinates, bb order xyz0 */
 +static void calc_bounding_box_x_x4(int na,const real *x,float *bb)
 +{
 +    int  j;
 +    real xl,xh,yl,yh,zl,zh;
 +
 +    xl = x[XX*PACK_X4];
 +    xh = x[XX*PACK_X4];
 +    yl = x[YY*PACK_X4];
 +    yh = x[YY*PACK_X4];
 +    zl = x[ZZ*PACK_X4];
 +    zh = x[ZZ*PACK_X4];
 +    for(j=1; j<na; j++)
 +    {
 +        xl = min(xl,x[j+XX*PACK_X4]);
 +        xh = max(xh,x[j+XX*PACK_X4]);
 +        yl = min(yl,x[j+YY*PACK_X4]);
 +        yh = max(yh,x[j+YY*PACK_X4]);
 +        zl = min(zl,x[j+ZZ*PACK_X4]);
 +        zh = max(zh,x[j+ZZ*PACK_X4]);
 +    }
 +    /* Note: possible double to float conversion here */
 +    bb[BBL_X] = R2F_D(xl);
 +    bb[BBL_Y] = R2F_D(yl);
 +    bb[BBL_Z] = R2F_D(zl);
 +    bb[BBU_X] = R2F_U(xh);
 +    bb[BBU_Y] = R2F_U(yh);
 +    bb[BBU_Z] = R2F_U(zh);
 +}
 +
 +/* Packed coordinates, bb order xyz0 */
 +static void calc_bounding_box_x_x8(int na,const real *x,float *bb)
 +{
 +    int  j;
 +    real xl,xh,yl,yh,zl,zh;
 +
 +    xl = x[XX*PACK_X8];
 +    xh = x[XX*PACK_X8];
 +    yl = x[YY*PACK_X8];
 +    yh = x[YY*PACK_X8];
 +    zl = x[ZZ*PACK_X8];
 +    zh = x[ZZ*PACK_X8];
 +    for(j=1; j<na; j++)
 +    {
 +        xl = min(xl,x[j+XX*PACK_X8]);
 +        xh = max(xh,x[j+XX*PACK_X8]);
 +        yl = min(yl,x[j+YY*PACK_X8]);
 +        yh = max(yh,x[j+YY*PACK_X8]);
 +        zl = min(zl,x[j+ZZ*PACK_X8]);
 +        zh = max(zh,x[j+ZZ*PACK_X8]);
 +    }
 +    /* Note: possible double to float conversion here */
 +    bb[BBL_X] = R2F_D(xl);
 +    bb[BBL_Y] = R2F_D(yl);
 +    bb[BBL_Z] = R2F_D(zl);
 +    bb[BBU_X] = R2F_U(xh);
 +    bb[BBU_Y] = R2F_U(yh);
 +    bb[BBU_Z] = R2F_U(zh);
 +}
 +
-     bb[0*STRIDE_8BB] = R2F_D(xl);
-     bb[1*STRIDE_8BB] = R2F_D(yl);
-     bb[2*STRIDE_8BB] = R2F_D(zl);
-     bb[3*STRIDE_8BB] = R2F_U(xh);
-     bb[4*STRIDE_8BB] = R2F_U(yh);
-     bb[5*STRIDE_8BB] = R2F_U(zh);
++#ifdef NBNXN_SEARCH_BB_SSE
 +
 +/* Packed coordinates, bb order xyz0 */
 +static void calc_bounding_box_x_x4_halves(int na,const real *x,
 +                                          float *bb,float *bbj)
 +{
 +    calc_bounding_box_x_x4(min(na,2),x,bbj);
 +
 +    if (na > 2)
 +    {
 +        calc_bounding_box_x_x4(min(na-2,2),x+(PACK_X4>>1),bbj+NNBSBB_B);
 +    }
 +    else
 +    {
 +        /* Set the "empty" bounding box to the same as the first one,
 +         * so we don't need to treat special cases in the rest of the code.
 +         */
 +        _mm_store_ps(bbj+NNBSBB_B         ,_mm_load_ps(bbj));
 +        _mm_store_ps(bbj+NNBSBB_B+NNBSBB_C,_mm_load_ps(bbj+NNBSBB_C));
 +    }
 +
 +    _mm_store_ps(bb         ,_mm_min_ps(_mm_load_ps(bbj),
 +                                        _mm_load_ps(bbj+NNBSBB_B)));
 +    _mm_store_ps(bb+NNBSBB_C,_mm_max_ps(_mm_load_ps(bbj+NNBSBB_C),
 +                                        _mm_load_ps(bbj+NNBSBB_B+NNBSBB_C)));
 +}
 +
 +/* Coordinate order xyz, bb order xxxxyyyyzzzz */
 +static void calc_bounding_box_xxxx(int na,int stride,const real *x,float *bb)
 +{
 +    int  i,j;
 +    real xl,xh,yl,yh,zl,zh;
 +
 +    i = 0;
 +    xl = x[i+XX];
 +    xh = x[i+XX];
 +    yl = x[i+YY];
 +    yh = x[i+YY];
 +    zl = x[i+ZZ];
 +    zh = x[i+ZZ];
 +    i += stride;
 +    for(j=1; j<na; j++)
 +    {
 +        xl = min(xl,x[i+XX]);
 +        xh = max(xh,x[i+XX]);
 +        yl = min(yl,x[i+YY]);
 +        yh = max(yh,x[i+YY]);
 +        zl = min(zl,x[i+ZZ]);
 +        zh = max(zh,x[i+ZZ]);
 +        i += stride;
 +    }
 +    /* Note: possible double to float conversion here */
- #endif /* NBNXN_SEARCH_SSE */
++    bb[0*STRIDE_PBB] = R2F_D(xl);
++    bb[1*STRIDE_PBB] = R2F_D(yl);
++    bb[2*STRIDE_PBB] = R2F_D(zl);
++    bb[3*STRIDE_PBB] = R2F_U(xh);
++    bb[4*STRIDE_PBB] = R2F_U(yh);
++    bb[5*STRIDE_PBB] = R2F_U(zh);
 +}
 +
-     bb[0*STRIDE_8BB] = bb_work[BBL_X];
-     bb[1*STRIDE_8BB] = bb_work[BBL_Y];
-     bb[2*STRIDE_8BB] = bb_work[BBL_Z];
-     bb[3*STRIDE_8BB] = bb_work[BBU_X];
-     bb[4*STRIDE_8BB] = bb_work[BBU_Y];
-     bb[5*STRIDE_8BB] = bb_work[BBU_Z];
++#endif /* NBNXN_SEARCH_BB_SSE */
 +
 +#ifdef NBNXN_SEARCH_SSE_SINGLE
 +
 +/* Coordinate order xyz?, bb order xyz0 */
 +static void calc_bounding_box_sse(int na,const float *x,float *bb)
 +{
 +    __m128 bb_0_SSE,bb_1_SSE;
 +    __m128 x_SSE;
 +
 +    int  i;
 +
 +    bb_0_SSE = _mm_load_ps(x);
 +    bb_1_SSE = bb_0_SSE;
 +
 +    for(i=1; i<na; i++)
 +    {
 +        x_SSE    = _mm_load_ps(x+i*NNBSBB_C);
 +        bb_0_SSE = _mm_min_ps(bb_0_SSE,x_SSE);
 +        bb_1_SSE = _mm_max_ps(bb_1_SSE,x_SSE);
 +    }
 +
 +    _mm_store_ps(bb  ,bb_0_SSE);
 +    _mm_store_ps(bb+4,bb_1_SSE);
 +}
 +
 +/* Coordinate order xyz?, bb order xxxxyyyyzzzz */
 +static void calc_bounding_box_xxxx_sse(int na,const float *x,
 +                                       float *bb_work,
 +                                       real *bb)
 +{
 +    calc_bounding_box_sse(na,x,bb_work);
 +
- #ifdef NBNXN_SEARCH_SSE
++    bb[0*STRIDE_PBB] = bb_work[BBL_X];
++    bb[1*STRIDE_PBB] = bb_work[BBL_Y];
++    bb[2*STRIDE_PBB] = bb_work[BBL_Z];
++    bb[3*STRIDE_PBB] = bb_work[BBU_X];
++    bb[4*STRIDE_PBB] = bb_work[BBU_Y];
++    bb[5*STRIDE_PBB] = bb_work[BBU_Z];
 +}
 +
 +#endif /* NBNXN_SEARCH_SSE_SINGLE */
 +
-         for(s=0; s<grid->nsubc[c]; s+=STRIDE_8BB)
++#ifdef NBNXN_SEARCH_BB_SSE
 +
 +/* Combines pairs of consecutive bounding boxes */
 +static void combine_bounding_box_pairs(nbnxn_grid_t *grid,const float *bb)
 +{
 +    int    i,j,sc2,nc2,c2;
 +    __m128 min_SSE,max_SSE;
 +
 +    for(i=0; i<grid->ncx*grid->ncy; i++)
 +    {
 +        /* Starting bb in a column is expected to be 2-aligned */
 +        sc2 = grid->cxy_ind[i]>>1;
 +        /* For odd numbers skip the last bb here */
 +        nc2 = (grid->cxy_na[i]+3)>>(2+1);
 +        for(c2=sc2; c2<sc2+nc2; c2++)
 +        {
 +            min_SSE = _mm_min_ps(_mm_load_ps(bb+(c2*4+0)*NNBSBB_C),
 +                                 _mm_load_ps(bb+(c2*4+2)*NNBSBB_C));
 +            max_SSE = _mm_max_ps(_mm_load_ps(bb+(c2*4+1)*NNBSBB_C),
 +                                 _mm_load_ps(bb+(c2*4+3)*NNBSBB_C));
 +            _mm_store_ps(grid->bbj+(c2*2+0)*NNBSBB_C,min_SSE);
 +            _mm_store_ps(grid->bbj+(c2*2+1)*NNBSBB_C,max_SSE);
 +        }
 +        if (((grid->cxy_na[i]+3)>>2) & 1)
 +        {
 +            /* Copy the last bb for odd bb count in this column */
 +            for(j=0; j<NNBSBB_C; j++)
 +            {
 +                grid->bbj[(c2*2+0)*NNBSBB_C+j] = bb[(c2*4+0)*NNBSBB_C+j];
 +                grid->bbj[(c2*2+1)*NNBSBB_C+j] = bb[(c2*4+1)*NNBSBB_C+j];
 +            }
 +        }
 +    }
 +}
 +
 +#endif
 +
 +
 +/* Prints the average bb size, used for debug output */
 +static void print_bbsizes_simple(FILE *fp,
 +                                 const nbnxn_search_t nbs,
 +                                 const nbnxn_grid_t *grid)
 +{
 +    int  c,d;
 +    dvec ba;
 +
 +    clear_dvec(ba);
 +    for(c=0; c<grid->nc; c++)
 +    {
 +        for(d=0; d<DIM; d++)
 +        {
 +            ba[d] += grid->bb[c*NNBSBB_B+NNBSBB_C+d] - grid->bb[c*NNBSBB_B+d];
 +        }
 +    }
 +    dsvmul(1.0/grid->nc,ba,ba);
 +
 +    fprintf(fp,"ns bb: %4.2f %4.2f %4.2f  %4.2f %4.2f %4.2f rel %4.2f %4.2f %4.2f\n",
 +            nbs->box[XX][XX]/grid->ncx,
 +            nbs->box[YY][YY]/grid->ncy,
 +            nbs->box[ZZ][ZZ]*grid->ncx*grid->ncy/grid->nc,
 +            ba[XX],ba[YY],ba[ZZ],
 +            ba[XX]*grid->ncx/nbs->box[XX][XX],
 +            ba[YY]*grid->ncy/nbs->box[YY][YY],
 +            ba[ZZ]*grid->nc/(grid->ncx*grid->ncy*nbs->box[ZZ][ZZ]));
 +}
 +
 +/* Prints the average bb size, used for debug output */
 +static void print_bbsizes_supersub(FILE *fp,
 +                                   const nbnxn_search_t nbs,
 +                                   const nbnxn_grid_t *grid)
 +{
 +    int  ns,c,s;
 +    dvec ba;
 +
 +    clear_dvec(ba);
 +    ns = 0;
 +    for(c=0; c<grid->nc; c++)
 +    {
 +#ifdef NBNXN_BBXXXX
-             cs_w = (c*GPU_NSUBCELL + s)/STRIDE_8BB;
-             for(i=0; i<STRIDE_8BB; i++)
++        for(s=0; s<grid->nsubc[c]; s+=STRIDE_PBB)
 +        {
 +            int cs_w,i,d;
 +
-                         grid->bb[cs_w*NNBSBB_XXXX+(DIM+d)*STRIDE_8BB+i] -
-                         grid->bb[cs_w*NNBSBB_XXXX+     d *STRIDE_8BB+i];
++            cs_w = (c*GPU_NSUBCELL + s)/STRIDE_PBB;
++            for(i=0; i<STRIDE_PBB; i++)
 +            {
 +                for(d=0; d<DIM; d++)
 +                {
 +                    ba[d] +=
- #if defined GMX_DOUBLE && defined NBNXN_SEARCH_SSE
++                        grid->bb[cs_w*NNBSBB_XXXX+(DIM+d)*STRIDE_PBB+i] -
++                        grid->bb[cs_w*NNBSBB_XXXX+     d *STRIDE_PBB+i];
 +                }
 +            }
 +        }
 +#else
 +        for(s=0; s<grid->nsubc[c]; s++)
 +        {
 +            int cs,d;
 +
 +            cs = c*GPU_NSUBCELL + s;
 +            for(d=0; d<DIM; d++)
 +            {
 +                ba[d] +=
 +                    grid->bb[cs*NNBSBB_B+NNBSBB_C+d] -
 +                    grid->bb[cs*NNBSBB_B         +d];
 +            }
 +        }
 +#endif
 +        ns += grid->nsubc[c];
 +    }
 +    dsvmul(1.0/ns,ba,ba);
 +
 +    fprintf(fp,"ns bb: %4.2f %4.2f %4.2f  %4.2f %4.2f %4.2f rel %4.2f %4.2f %4.2f\n",
 +            nbs->box[XX][XX]/(grid->ncx*GPU_NSUBCELL_X),
 +            nbs->box[YY][YY]/(grid->ncy*GPU_NSUBCELL_Y),
 +            nbs->box[ZZ][ZZ]*grid->ncx*grid->ncy/(grid->nc*GPU_NSUBCELL_Z),
 +            ba[XX],ba[YY],ba[ZZ],
 +            ba[XX]*grid->ncx*GPU_NSUBCELL_X/nbs->box[XX][XX],
 +            ba[YY]*grid->ncy*GPU_NSUBCELL_Y/nbs->box[YY][YY],
 +            ba[ZZ]*grid->nc*GPU_NSUBCELL_Z/(grid->ncx*grid->ncy*nbs->box[ZZ][ZZ]));
 +}
 +
 +/* Potentially sorts atoms on LJ coefficients !=0 and ==0.
 + * Also sets interaction flags.
 + */
 +void sort_on_lj(nbnxn_atomdata_t *nbat,int na_c,
 +                int a0,int a1,const int *atinfo,
 +                int *order,
 +                int *flags)
 +{
 +    int subc,s,a,n1,n2,a_lj_max,i,j;
 +    int sort1[NBNXN_NA_SC_MAX/GPU_NSUBCELL];
 +    int sort2[NBNXN_NA_SC_MAX/GPU_NSUBCELL];
 +    gmx_bool haveQ;
 +
 +    *flags = 0;
 +
 +    subc = 0;
 +    for(s=a0; s<a1; s+=na_c)
 +    {
 +        /* Make lists for this (sub-)cell on atoms with and without LJ */
 +        n1 = 0;
 +        n2 = 0;
 +        haveQ = FALSE;
 +        a_lj_max = -1;
 +        for(a=s; a<min(s+na_c,a1); a++)
 +        {
 +            haveQ = haveQ || GET_CGINFO_HAS_Q(atinfo[order[a]]);
 +
 +            if (GET_CGINFO_HAS_VDW(atinfo[order[a]]))
 +            {
 +                sort1[n1++] = order[a];
 +                a_lj_max = a;
 +            }
 +            else
 +            {
 +                sort2[n2++] = order[a];
 +            }
 +        }
 +
 +        /* If we don't have atom with LJ, there's nothing to sort */
 +        if (n1 > 0)
 +        {
 +            *flags |= NBNXN_CI_DO_LJ(subc);
 +
 +            if (2*n1 <= na_c)
 +            {
 +                /* Only sort when strictly necessary. Ordering particles
 +                 * Ordering particles can lead to less accurate summation
 +                 * due to rounding, both for LJ and Coulomb interactions.
 +                 */
 +                if (2*(a_lj_max - s) >= na_c)
 +                {
 +                    for(i=0; i<n1; i++)
 +                    {
 +                        order[a0+i] = sort1[i];
 +                    }
 +                    for(j=0; j<n2; j++)
 +                    {
 +                        order[a0+n1+j] = sort2[j];
 +                    }
 +                }
 +
 +                *flags |= NBNXN_CI_HALF_LJ(subc);
 +            }
 +        }
 +        if (haveQ)
 +        {
 +            *flags |= NBNXN_CI_DO_COUL(subc);
 +        }
 +        subc++;
 +    }
 +}
 +
 +/* Fill a pair search cell with atoms.
 + * Potentially sorts atoms and sets the interaction flags.
 + */
 +void fill_cell(const nbnxn_search_t nbs,
 +               nbnxn_grid_t *grid,
 +               nbnxn_atomdata_t *nbat,
 +               int a0,int a1,
 +               const int *atinfo,
 +               rvec *x,
 +               int sx,int sy, int sz,
 +               float *bb_work)
 +{
 +    int    na,a;
 +    size_t offset;
 +    float  *bb_ptr;
 +
 +    na = a1 - a0;
 +
 +    if (grid->bSimple)
 +    {
 +        sort_on_lj(nbat,grid->na_c,a0,a1,atinfo,nbs->a,
 +                   grid->flags+(a0>>grid->na_c_2log)-grid->cell0);
 +    }
 +
 +    /* Now we have sorted the atoms, set the cell indices */
 +    for(a=a0; a<a1; a++)
 +    {
 +        nbs->cell[nbs->a[a]] = a;
 +    }
 +
 +    copy_rvec_to_nbat_real(nbs->a+a0,a1-a0,grid->na_c,x,
 +                           nbat->XFormat,nbat->x,a0,
 +                           sx,sy,sz);
 +
 +    if (nbat->XFormat == nbatX4)
 +    {
 +        /* Store the bounding boxes as xyz.xyz. */
 +        offset = ((a0 - grid->cell0*grid->na_sc)>>grid->na_c_2log)*NNBSBB_B;
 +        bb_ptr = grid->bb + offset;
 +
-             ((a0-grid->cell0*grid->na_sc)>>(grid->na_c_2log+STRIDE_8BB_2LOG))*NNBSBB_XXXX +
-             (((a0-grid->cell0*grid->na_sc)>>grid->na_c_2log) & (STRIDE_8BB-1));
++#if defined GMX_DOUBLE && defined NBNXN_SEARCH_BB_SSE
 +        if (2*grid->na_cj == grid->na_c)
 +        {
 +            calc_bounding_box_x_x4_halves(na,nbat->x+X4_IND_A(a0),bb_ptr,
 +                                          grid->bbj+offset*2);
 +        }
 +        else
 +#endif
 +        {
 +            calc_bounding_box_x_x4(na,nbat->x+X4_IND_A(a0),bb_ptr);
 +        }
 +    }
 +    else if (nbat->XFormat == nbatX8)
 +    {
 +        /* Store the bounding boxes as xyz.xyz. */
 +        offset = ((a0 - grid->cell0*grid->na_sc)>>grid->na_c_2log)*NNBSBB_B;
 +        bb_ptr = grid->bb + offset;
 +
 +        calc_bounding_box_x_x8(na,nbat->x+X8_IND_A(a0),bb_ptr);
 +    }
 +#ifdef NBNXN_BBXXXX
 +    else if (!grid->bSimple)
 +    {
 +        /* Store the bounding boxes in a format convenient
 +         * for SSE calculations: xxxxyyyyzzzz...
 +                             */
 +        bb_ptr =
 +            grid->bb +
-                     bb_ptr[0*STRIDE_8BB],bb_ptr[3*STRIDE_8BB],
-                     bb_ptr[1*STRIDE_8BB],bb_ptr[4*STRIDE_8BB],
-                     bb_ptr[2*STRIDE_8BB],bb_ptr[5*STRIDE_8BB]);
++            ((a0-grid->cell0*grid->na_sc)>>(grid->na_c_2log+STRIDE_PBB_2LOG))*NNBSBB_XXXX +
++            (((a0-grid->cell0*grid->na_sc)>>grid->na_c_2log) & (STRIDE_PBB-1));
 +
 +#ifdef NBNXN_SEARCH_SSE_SINGLE
 +        if (nbat->XFormat == nbatXYZQ)
 +        {
 +            calc_bounding_box_xxxx_sse(na,nbat->x+a0*nbat->xstride,
 +                                       bb_work,bb_ptr);
 +        }
 +        else
 +#endif
 +        {
 +            calc_bounding_box_xxxx(na,nbat->xstride,nbat->x+a0*nbat->xstride,
 +                                   bb_ptr);
 +        }
 +        if (gmx_debug_at)
 +        {
 +            fprintf(debug,"%2d %2d %2d bb %5.2f %5.2f %5.2f %5.2f %5.2f %5.2f\n",
 +                    sx,sy,sz,
-                                 rvec *x,const int *move,
++                    bb_ptr[0*STRIDE_PBB],bb_ptr[3*STRIDE_PBB],
++                    bb_ptr[1*STRIDE_PBB],bb_ptr[4*STRIDE_PBB],
++                    bb_ptr[2*STRIDE_PBB],bb_ptr[5*STRIDE_PBB]);
 +        }
 +    }
 +#endif
 +    else
 +    {
 +        /* Store the bounding boxes as xyz.xyz. */
 +        bb_ptr = grid->bb+((a0-grid->cell0*grid->na_sc)>>grid->na_c_2log)*NNBSBB_B;
 +
 +        calc_bounding_box(na,nbat->xstride,nbat->x+a0*nbat->xstride,
 +                          bb_ptr);
 +
 +        if (gmx_debug_at)
 +        {
 +            int bbo;
 +            bbo = (a0 - grid->cell0*grid->na_sc)/grid->na_c;
 +            fprintf(debug,"%2d %2d %2d bb %5.2f %5.2f %5.2f %5.2f %5.2f %5.2f\n",
 +                    sx,sy,sz,
 +                    (grid->bb+bbo*NNBSBB_B)[BBL_X],
 +                    (grid->bb+bbo*NNBSBB_B)[BBU_X],
 +                    (grid->bb+bbo*NNBSBB_B)[BBL_Y],
 +                    (grid->bb+bbo*NNBSBB_B)[BBU_Y],
 +                    (grid->bb+bbo*NNBSBB_B)[BBL_Z],
 +                    (grid->bb+bbo*NNBSBB_B)[BBU_Z]);
 +        }
 +    }
 +}
 +
 +/* Spatially sort the atoms within one grid column */
 +static void sort_columns_simple(const nbnxn_search_t nbs,
 +                                int dd_zone,
 +                                nbnxn_grid_t *grid,
 +                                int a0,int a1,
 +                                const int *atinfo,
 +                                rvec *x,
 +                                nbnxn_atomdata_t *nbat,
 +                                int cxy_start,int cxy_end,
 +                                int *sort_work)
 +{
 +    int  cxy;
 +    int  cx,cy,cz,ncz,cfilled,c;
 +    int  na,ash,ind,a;
 +    int  na_c,ash_c;
 +
 +    if (debug)
 +    {
 +        fprintf(debug,"cell0 %d sorting columns %d - %d, atoms %d - %d\n",
 +                grid->cell0,cxy_start,cxy_end,a0,a1);
 +    }
 +
 +    /* Sort the atoms within each x,y column in 3 dimensions */
 +    for(cxy=cxy_start; cxy<cxy_end; cxy++)
 +    {
 +        cx = cxy/grid->ncy;
 +        cy = cxy - cx*grid->ncy;
 +
 +        na  = grid->cxy_na[cxy];
 +        ncz = grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy];
 +        ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
 +
 +        /* Sort the atoms within each x,y column on z coordinate */
 +        sort_atoms(ZZ,FALSE,
 +                   nbs->a+ash,na,x,
 +                   grid->c0[ZZ],
 +                   ncz*grid->na_sc*SORT_GRID_OVERSIZE/nbs->box[ZZ][ZZ],
 +                   ncz*grid->na_sc*SGSF,sort_work);
 +
 +        /* Fill the ncz cells in this column */
 +        cfilled = grid->cxy_ind[cxy];
 +        for(cz=0; cz<ncz; cz++)
 +        {
 +            c  = grid->cxy_ind[cxy] + cz ;
 +
 +            ash_c = ash + cz*grid->na_sc;
 +            na_c  = min(grid->na_sc,na-(ash_c-ash));
 +
 +            fill_cell(nbs,grid,nbat,
 +                      ash_c,ash_c+na_c,atinfo,x,
 +                      grid->na_sc*cx + (dd_zone >> 2),
 +                      grid->na_sc*cy + (dd_zone & 3),
 +                      grid->na_sc*cz,
 +                      NULL);
 +
 +            /* This copy to bbcz is not really necessary.
 +             * But it allows to use the same grid search code
 +             * for the simple and supersub cell setups.
 +             */
 +            if (na_c > 0)
 +            {
 +                cfilled = c;
 +            }
 +            grid->bbcz[c*NNBSBB_D  ] = grid->bb[cfilled*NNBSBB_B+2];
 +            grid->bbcz[c*NNBSBB_D+1] = grid->bb[cfilled*NNBSBB_B+6];
 +        }
 +
 +        /* Set the unused atom indices to -1 */
 +        for(ind=na; ind<ncz*grid->na_sc; ind++)
 +        {
 +            nbs->a[ash+ind] = -1;
 +        }
 +    }
 +}
 +
 +/* Spatially sort the atoms within one grid column */
 +static void sort_columns_supersub(const nbnxn_search_t nbs,
 +                                  int dd_zone,
 +                                  nbnxn_grid_t *grid,
 +                                  int a0,int a1,
 +                                  const int *atinfo,
 +                                  rvec *x,
 +                                  nbnxn_atomdata_t *nbat,
 +                                  int cxy_start,int cxy_end,
 +                                  int *sort_work)
 +{
 +    int  cxy;
 +    int  cx,cy,cz=-1,c=-1,ncz;
 +    int  na,ash,na_c,ind,a;
 +    int  subdiv_z,sub_z,na_z,ash_z;
 +    int  subdiv_y,sub_y,na_y,ash_y;
 +    int  subdiv_x,sub_x,na_x,ash_x;
 +
 +    /* cppcheck-suppress unassignedVariable */
 +    float bb_work_array[NNBSBB_B+3],*bb_work_align;
 +
 +    bb_work_align = (float *)(((size_t)(bb_work_array+3)) & (~((size_t)15)));
 +
 +    if (debug)
 +    {
 +        fprintf(debug,"cell0 %d sorting columns %d - %d, atoms %d - %d\n",
 +                grid->cell0,cxy_start,cxy_end,a0,a1);
 +    }
 +
 +    subdiv_x = grid->na_c;
 +    subdiv_y = GPU_NSUBCELL_X*subdiv_x;
 +    subdiv_z = GPU_NSUBCELL_Y*subdiv_y;
 +
 +    /* Sort the atoms within each x,y column in 3 dimensions */
 +    for(cxy=cxy_start; cxy<cxy_end; cxy++)
 +    {
 +        cx = cxy/grid->ncy;
 +        cy = cxy - cx*grid->ncy;
 +
 +        na  = grid->cxy_na[cxy];
 +        ncz = grid->cxy_ind[cxy+1] - grid->cxy_ind[cxy];
 +        ash = (grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc;
 +
 +        /* Sort the atoms within each x,y column on z coordinate */
 +        sort_atoms(ZZ,FALSE,
 +                   nbs->a+ash,na,x,
 +                   grid->c0[ZZ],
 +                   ncz*grid->na_sc*SORT_GRID_OVERSIZE/nbs->box[ZZ][ZZ],
 +                   ncz*grid->na_sc*SGSF,sort_work);
 +
 +        /* This loop goes over the supercells and subcells along z at once */
 +        for(sub_z=0; sub_z<ncz*GPU_NSUBCELL_Z; sub_z++)
 +        {
 +            ash_z = ash + sub_z*subdiv_z;
 +            na_z  = min(subdiv_z,na-(ash_z-ash));
 +
 +            /* We have already sorted on z */
 +
 +            if (sub_z % GPU_NSUBCELL_Z == 0)
 +            {
 +                cz = sub_z/GPU_NSUBCELL_Z;
 +                c  = grid->cxy_ind[cxy] + cz ;
 +
 +                /* The number of atoms in this supercell */
 +                na_c = min(grid->na_sc,na-(ash_z-ash));
 +
 +                grid->nsubc[c] = min(GPU_NSUBCELL,(na_c+grid->na_c-1)/grid->na_c);
 +
 +                /* Store the z-boundaries of the super cell */
 +                grid->bbcz[c*NNBSBB_D  ] = x[nbs->a[ash_z]][ZZ];
 +                grid->bbcz[c*NNBSBB_D+1] = x[nbs->a[ash_z+na_c-1]][ZZ];
 +            }
 +
 +#if GPU_NSUBCELL_Y > 1
 +            /* Sort the atoms along y */
 +            sort_atoms(YY,(sub_z & 1),
 +                       nbs->a+ash_z,na_z,x,
 +                       grid->c0[YY]+cy*grid->sy,grid->inv_sy,
 +                       subdiv_y*SGSF,sort_work);
 +#endif
 +
 +            for(sub_y=0; sub_y<GPU_NSUBCELL_Y; sub_y++)
 +            {
 +                ash_y = ash_z + sub_y*subdiv_y;
 +                na_y  = min(subdiv_y,na-(ash_y-ash));
 +
 +#if GPU_NSUBCELL_X > 1
 +                /* Sort the atoms along x */
 +                sort_atoms(XX,((cz*GPU_NSUBCELL_Y + sub_y) & 1),
 +                           nbs->a+ash_y,na_y,x,
 +                           grid->c0[XX]+cx*grid->sx,grid->inv_sx,
 +                           subdiv_x*SGSF,sort_work);
 +#endif
 +
 +                for(sub_x=0; sub_x<GPU_NSUBCELL_X; sub_x++)
 +                {
 +                    ash_x = ash_y + sub_x*subdiv_x;
 +                    na_x  = min(subdiv_x,na-(ash_x-ash));
 +
 +                    fill_cell(nbs,grid,nbat,
 +                              ash_x,ash_x+na_x,atinfo,x,
 +                              grid->na_c*(cx*GPU_NSUBCELL_X+sub_x) + (dd_zone >> 2),
 +                              grid->na_c*(cy*GPU_NSUBCELL_Y+sub_y) + (dd_zone & 3),
 +                              grid->na_c*sub_z,
 +                              bb_work_align);
 +                }
 +            }
 +        }
 +
 +        /* Set the unused atom indices to -1 */
 +        for(ind=na; ind<ncz*grid->na_sc; ind++)
 +        {
 +            nbs->a[ash+ind] = -1;
 +        }
 +    }
 +}
 +
 +/* Determine in which grid column atoms should go */
 +static void calc_column_indices(nbnxn_grid_t *grid,
 +                                int a0,int a1,
-     for(i=n0; i<n1; i++)
++                                rvec *x,
++                                int dd_zone,const int *move,
 +                                int thread,int nthread,
 +                                int *cell,
 +                                int *cxy_na)
 +{
 +    int  n0,n1,i;
 +    int  cx,cy;
 +
 +    /* We add one extra cell for particles which moved during DD */
 +    for(i=0; i<grid->ncx*grid->ncy+1; i++)
 +    {
 +        cxy_na[i] = 0;
 +    }
 +
 +    n0 = a0 + (int)((thread+0)*(a1 - a0))/nthread;
 +    n1 = a0 + (int)((thread+1)*(a1 - a0))/nthread;
-         if (move == NULL || move[i] >= 0)
++    if (dd_zone == 0)
 +    {
-             /* We need to be careful with rounding,
-              * particles might be a few bits outside the local box.
-              * The int cast takes care of the lower bound,
-              * we need to explicitly take care of the upper bound.
-              */
-             cx = (int)((x[i][XX] - grid->c0[XX])*grid->inv_sx);
-             if (cx == grid->ncx)
-             {
-                 cx = grid->ncx - 1;
-             }
-             cy = (int)((x[i][YY] - grid->c0[YY])*grid->inv_sy);
-             if (cy == grid->ncy)
++        /* Home zone */
++        for(i=n0; i<n1; i++)
 +        {
-                 cy = grid->ncy - 1;
-             }
-             /* For the moment cell contains only the, grid local,
-              * x and y indices, not z.
-              */
-             cell[i] = cx*grid->ncy + cy;
++            if (move == NULL || move[i] >= 0)
 +            {
-             if (cell[i] < 0 || cell[i] >= grid->ncx*grid->ncy)
++                /* We need to be careful with rounding,
++                 * particles might be a few bits outside the local zone.
++                 * The int cast takes care of the lower bound,
++                 * we will explicitly take care of the upper bound.
++                 */
++                cx = (int)((x[i][XX] - grid->c0[XX])*grid->inv_sx);
++                cy = (int)((x[i][YY] - grid->c0[YY])*grid->inv_sy);
 +
 +#ifdef DEBUG_NBNXN_GRIDDING
-                 gmx_fatal(FARGS,
-                           "grid cell cx %d cy %d out of range (max %d %d)\n"
-                           "atom %f %f %f, grid->c0 %f %f",
-                           cx,cy,grid->ncx,grid->ncy,
-                           x[i][XX],x[i][YY],x[i][ZZ],grid->c0[XX],grid->c0[YY]);
++                if (cx < 0 || cx >= grid->ncx ||
++                    cy < 0 || cy >= grid->ncy)
++                {
++                    gmx_fatal(FARGS,
++                              "grid cell cx %d cy %d out of range (max %d %d)\n"
++                              "atom %f %f %f, grid->c0 %f %f",
++                              cx,cy,grid->ncx,grid->ncy,
++                              x[i][XX],x[i][YY],x[i][ZZ],grid->c0[XX],grid->c0[YY]);
++                }
++#endif
++                /* Take care of potential rouding issues */
++                cx = min(cx,grid->ncx - 1);
++                cy = min(cy,grid->ncy - 1);
++
++                /* For the moment cell will contain only the, grid local,
++                 * x and y indices, not z.
++                 */
++                cell[i] = cx*grid->ncy + cy;
++            }
++            else
 +            {
- #endif
++                /* Put this moved particle after the end of the grid,
++                 * so we can process it later without using conditionals.
++                 */
++                cell[i] = grid->ncx*grid->ncy;
 +            }
-         else
++
++            cxy_na[cell[i]]++;
 +        }
-             /* Put this moved particle after the end of the grid,
-              * so we can process it later without using conditionals.
++    }
++    else
++    {
++        /* Non-home zone */
++        for(i=n0; i<n1; i++)
 +        {
-             cell[i] = grid->ncx*grid->ncy;
-         }
++            cx = (int)((x[i][XX] - grid->c0[XX])*grid->inv_sx);
++            cy = (int)((x[i][YY] - grid->c0[YY])*grid->inv_sy);
++
++            /* For non-home zones there could be particles outside
++             * the non-bonded cut-off range, which have been communicated
++             * for bonded interactions only. For the result it doesn't
++             * matter where these end up on the grid. For performance
++             * we put them in an extra row at the border.
 +             */
-         cxy_na[cell[i]]++;
++            cx = max(cx,0);
++            cx = min(cx,grid->ncx - 1);
++            cy = max(cy,0);
++            cy = min(cy,grid->ncy - 1);
 +
-         calc_column_indices(grid,a0,a1,x,move,thread,nthread,
++            /* For the moment cell will contain only the, grid local,
++             * x and y indices, not z.
++             */
++            cell[i] = cx*grid->ncy + cy;
++
++            cxy_na[cell[i]]++;
++        }
 +    }
 +}
 +
 +/* Determine in which grid cells the atoms should go */
 +static void calc_cell_indices(const nbnxn_search_t nbs,
 +                              int dd_zone,
 +                              nbnxn_grid_t *grid,
 +                              int a0,int a1,
 +                              const int *atinfo,
 +                              rvec *x,
 +                              const int *move,
 +                              nbnxn_atomdata_t *nbat)
 +{
 +    int  n0,n1,i;
 +    int  cx,cy,cxy,ncz_max,ncz;
 +    int  nthread,thread;
 +    int  *cxy_na,cxy_na_i;
 +
 +    nthread = gmx_omp_nthreads_get(emntPairsearch);
 +
 +#pragma omp parallel for num_threads(nthread) schedule(static)
 +    for(thread=0; thread<nthread; thread++)
 +    {
-     /* Set the cell indices for the moved particles */
-     n0 = grid->nc*grid->na_sc;
-     n1 = grid->nc*grid->na_sc+grid->cxy_na[grid->ncx*grid->ncy];
-     for(i=n0; i<n1; i++)
++        calc_column_indices(grid,a0,a1,x,dd_zone,move,thread,nthread,
 +                            nbs->cell,nbs->work[thread].cxy_na);
 +    }
 +
 +    /* Make the cell index as a function of x and y */
 +    ncz_max = 0;
 +    ncz = 0;
 +    grid->cxy_ind[0] = 0;
 +    for(i=0; i<grid->ncx*grid->ncy+1; i++)
 +    {
 +        /* We set ncz_max at the beginning of the loop iso at the end
 +         * to skip i=grid->ncx*grid->ncy which are moved particles
 +         * that do not need to be ordered on the grid.
 +         */
 +        if (ncz > ncz_max)
 +        {
 +            ncz_max = ncz;
 +        }
 +        cxy_na_i = nbs->work[0].cxy_na[i];
 +        for(thread=1; thread<nthread; thread++)
 +        {
 +            cxy_na_i += nbs->work[thread].cxy_na[i];
 +        }
 +        ncz = (cxy_na_i + grid->na_sc - 1)/grid->na_sc;
 +        if (nbat->XFormat == nbatX8)
 +        {
 +            /* Make the number of cell a multiple of 2 */
 +            ncz = (ncz + 1) & ~1;
 +        }
 +        grid->cxy_ind[i+1] = grid->cxy_ind[i] + ncz;
 +        /* Clear cxy_na, so we can reuse the array below */
 +        grid->cxy_na[i] = 0;
 +    }
 +    grid->nc = grid->cxy_ind[grid->ncx*grid->ncy] - grid->cxy_ind[0];
 +
 +    nbat->natoms = (grid->cell0 + grid->nc)*grid->na_sc;
 +
 +    if (debug)
 +    {
 +        fprintf(debug,"ns na_sc %d na_c %d super-cells: %d x %d y %d z %.1f maxz %d\n",
 +                grid->na_sc,grid->na_c,grid->nc,
 +                grid->ncx,grid->ncy,grid->nc/((double)(grid->ncx*grid->ncy)),
 +                ncz_max);
 +        if (gmx_debug_at)
 +        {
 +            i = 0;
 +            for(cy=0; cy<grid->ncy; cy++)
 +            {
 +                for(cx=0; cx<grid->ncx; cx++)
 +                {
 +                    fprintf(debug," %2d",grid->cxy_ind[i+1]-grid->cxy_ind[i]);
 +                    i++;
 +                }
 +                fprintf(debug,"\n");
 +            }
 +        }
 +    }
 +
 +    /* Make sure the work array for sorting is large enough */
 +    if (ncz_max*grid->na_sc*SGSF > nbs->work[0].sort_work_nalloc)
 +    {
 +        for(thread=0; thread<nbs->nthread_max; thread++)
 +        {
 +            nbs->work[thread].sort_work_nalloc =
 +                over_alloc_large(ncz_max*grid->na_sc*SGSF);
 +            srenew(nbs->work[thread].sort_work,
 +                   nbs->work[thread].sort_work_nalloc);
++            /* When not in use, all elements should be -1 */
++            for(i=0; i<nbs->work[thread].sort_work_nalloc; i++)
++            {
++                nbs->work[thread].sort_work[i] = -1;
++            }
 +        }
 +    }
 +
 +    /* Now we know the dimensions we can fill the grid.
 +     * This is the first, unsorted fill. We sort the columns after this.
 +     */
 +    for(i=a0; i<a1; i++)
 +    {
 +        /* At this point nbs->cell contains the local grid x,y indices */
 +        cxy = nbs->cell[i];
 +        nbs->a[(grid->cell0 + grid->cxy_ind[cxy])*grid->na_sc + grid->cxy_na[cxy]++] = i;
 +    }
 +
-         nbs->cell[nbs->a[i]] = i;
++    if (dd_zone == 0)
 +    {
- #ifdef NBNXN_SEARCH_SSE
++        /* Set the cell indices for the moved particles */
++        n0 = grid->nc*grid->na_sc;
++        n1 = grid->nc*grid->na_sc+grid->cxy_na[grid->ncx*grid->ncy];
++        if (dd_zone == 0)
++        {
++            for(i=n0; i<n1; i++)
++            {
++                nbs->cell[nbs->a[i]] = i;
++            }
++        }
 +    }
 +
 +    /* Sort the super-cell columns along z into the sub-cells. */
 +#pragma omp parallel for num_threads(nbs->nthread_max) schedule(static)
 +    for(thread=0; thread<nbs->nthread_max; thread++)
 +    {
 +        if (grid->bSimple)
 +        {
 +            sort_columns_simple(nbs,dd_zone,grid,a0,a1,atinfo,x,nbat,
 +                                ((thread+0)*grid->ncx*grid->ncy)/nthread,
 +                                ((thread+1)*grid->ncx*grid->ncy)/nthread,
 +                                nbs->work[thread].sort_work);
 +        }
 +        else
 +        {
 +            sort_columns_supersub(nbs,dd_zone,grid,a0,a1,atinfo,x,nbat,
 +                                  ((thread+0)*grid->ncx*grid->ncy)/nthread,
 +                                  ((thread+1)*grid->ncx*grid->ncy)/nthread,
 +                                  nbs->work[thread].sort_work);
 +        }
 +    }
 +
-     nc_max_grid = set_grid_size_xy(nbs,grid,n-nmoved,corner0,corner1,
++#ifdef NBNXN_SEARCH_BB_SSE
 +    if (grid->bSimple && nbat->XFormat == nbatX8)
 +    {
 +        combine_bounding_box_pairs(grid,grid->bb);
 +    }
 +#endif
 +
 +    if (!grid->bSimple)
 +    {
 +        grid->nsubc_tot = 0;
 +        for(i=0; i<grid->nc; i++)
 +        {
 +            grid->nsubc_tot += grid->nsubc[i];
 +        }
 +    }
 +
 +    if (debug)
 +    {
 +        if (grid->bSimple)
 +        {
 +            print_bbsizes_simple(debug,nbs,grid);
 +        }
 +        else
 +        {
 +            fprintf(debug,"ns non-zero sub-cells: %d average atoms %.2f\n",
 +                    grid->nsubc_tot,(a1-a0)/(double)grid->nsubc_tot);
 +
 +            print_bbsizes_supersub(debug,nbs,grid);
 +        }
 +    }
 +}
 +
 +static void init_buffer_flags(nbnxn_buffer_flags_t *flags,
 +                              int natoms)
 +{
 +    int b;
 +
 +    flags->nflag = (natoms + NBNXN_BUFFERFLAG_SIZE - 1)/NBNXN_BUFFERFLAG_SIZE;
 +    if (flags->nflag > flags->flag_nalloc)
 +    {
 +        flags->flag_nalloc = over_alloc_large(flags->nflag);
 +        srenew(flags->flag,flags->flag_nalloc);
 +    }
 +    for(b=0; b<flags->nflag; b++)
 +    {
 +        flags->flag[b] = 0;
 +    }
 +}
 +
 +/* Sets up a grid and puts the atoms on the grid.
 + * This function only operates on one domain of the domain decompostion.
 + * Note that without domain decomposition there is only one domain.
 + */
 +void nbnxn_put_on_grid(nbnxn_search_t nbs,
 +                       int ePBC,matrix box,
 +                       int dd_zone,
 +                       rvec corner0,rvec corner1,
 +                       int a0,int a1,
 +                       real atom_density,
 +                       const int *atinfo,
 +                       rvec *x,
 +                       int nmoved,int *move,
 +                       int nb_kernel_type,
 +                       nbnxn_atomdata_t *nbat)
 +{
 +    nbnxn_grid_t *grid;
 +    int n;
 +    int nc_max_grid,nc_max;
 +
 +    grid = &nbs->grid[dd_zone];
 +
 +    nbs_cycle_start(&nbs->cc[enbsCCgrid]);
 +
 +    grid->bSimple = nbnxn_kernel_pairlist_simple(nb_kernel_type);
 +
 +    grid->na_c      = nbnxn_kernel_to_ci_size(nb_kernel_type);
 +    grid->na_cj     = nbnxn_kernel_to_cj_size(nb_kernel_type);
 +    grid->na_sc     = (grid->bSimple ? 1 : GPU_NSUBCELL)*grid->na_c;
 +    grid->na_c_2log = get_2log(grid->na_c);
 +
 +    nbat->na_c = grid->na_c;
 +
 +    if (dd_zone == 0)
 +    {
 +        grid->cell0 = 0;
 +    }
 +    else
 +    {
 +        grid->cell0 =
 +            (nbs->grid[dd_zone-1].cell0 + nbs->grid[dd_zone-1].nc)*
 +            nbs->grid[dd_zone-1].na_sc/grid->na_sc;
 +    }
 +
 +    n = a1 - a0;
 +
 +    if (dd_zone == 0)
 +    {
 +        nbs->ePBC = ePBC;
 +        copy_mat(box,nbs->box);
 +
 +        if (atom_density >= 0)
 +        {
 +            grid->atom_density = atom_density;
 +        }
 +        else
 +        {
 +            grid->atom_density = grid_atom_density(n-nmoved,corner0,corner1);
 +        }
 +
 +        grid->cell0 = 0;
 +
 +        nbs->natoms_local    = a1 - nmoved;
 +        /* We assume that nbnxn_put_on_grid is called first
 +         * for the local atoms (dd_zone=0).
 +         */
 +        nbs->natoms_nonlocal = a1 - nmoved;
 +    }
 +    else
 +    {
 +        nbs->natoms_nonlocal = max(nbs->natoms_nonlocal,a1);
 +    }
 +
- #ifdef NBNXN_SEARCH_SSE
++    nc_max_grid = set_grid_size_xy(nbs,grid,
++                                   dd_zone,n-nmoved,corner0,corner1,
 +                                   nbs->grid[0].atom_density,
 +                                   nbat->XFormat);
 +
 +    nc_max = grid->cell0 + nc_max_grid;
 +
 +    if (a1 > nbs->cell_nalloc)
 +    {
 +        nbs->cell_nalloc = over_alloc_large(a1);
 +        srenew(nbs->cell,nbs->cell_nalloc);
 +    }
 +
 +    /* To avoid conditionals we store the moved particles at the end of a,
 +     * make sure we have enough space.
 +     */
 +    if (nc_max*grid->na_sc + nmoved > nbs->a_nalloc)
 +    {
 +        nbs->a_nalloc = over_alloc_large(nc_max*grid->na_sc + nmoved);
 +        srenew(nbs->a,nbs->a_nalloc);
 +    }
 +
 +    /* We need padding up to a multiple of the buffer flag size: simply add */
 +    if (nc_max*grid->na_sc + NBNXN_BUFFERFLAG_SIZE > nbat->nalloc)
 +    {
 +        nbnxn_atomdata_realloc(nbat,nc_max*grid->na_sc+NBNXN_BUFFERFLAG_SIZE);
 +    }
 +
 +    calc_cell_indices(nbs,dd_zone,grid,a0,a1,atinfo,x,move,nbat);
 +
 +    if (dd_zone == 0)
 +    {
 +        nbat->natoms_local = nbat->natoms;
 +    }
 +
 +    nbs_cycle_stop(&nbs->cc[enbsCCgrid]);
 +}
 +
 +/* Calls nbnxn_put_on_grid for all non-local domains */
 +void nbnxn_put_on_grid_nonlocal(nbnxn_search_t nbs,
 +                                const gmx_domdec_zones_t *zones,
 +                                const int *atinfo,
 +                                rvec *x,
 +                                int nb_kernel_type,
 +                                nbnxn_atomdata_t *nbat)
 +{
 +    int  zone,d;
 +    rvec c0,c1;
 +
 +    for(zone=1; zone<zones->n; zone++)
 +    {
 +        for(d=0; d<DIM; d++)
 +        {
 +            c0[d] = zones->size[zone].bb_x0[d];
 +            c1[d] = zones->size[zone].bb_x1[d];
 +        }
 +
 +        nbnxn_put_on_grid(nbs,nbs->ePBC,NULL,
 +                          zone,c0,c1,
 +                          zones->cg_range[zone],
 +                          zones->cg_range[zone+1],
 +                          -1,
 +                          atinfo,
 +                          x,
 +                          0,NULL,
 +                          nb_kernel_type,
 +                          nbat);
 +    }
 +}
 +
 +/* Add simple grid type information to the local super/sub grid */
 +void nbnxn_grid_add_simple(nbnxn_search_t nbs,
 +                           nbnxn_atomdata_t *nbat)
 +{
 +    nbnxn_grid_t *grid;
 +    float *bbcz,*bb;
 +    int ncd,sc;
 +
 +    grid = &nbs->grid[0];
 +
 +    if (grid->bSimple)
 +    {
 +        gmx_incons("nbnxn_grid_simple called with a simple grid");
 +    }
 +
 +    ncd = grid->na_sc/NBNXN_CPU_CLUSTER_I_SIZE;
 +
 +    if (grid->nc*ncd > grid->nc_nalloc_simple)
 +    {
 +        grid->nc_nalloc_simple = over_alloc_large(grid->nc*ncd);
 +        srenew(grid->bbcz_simple,grid->nc_nalloc_simple*NNBSBB_D);
 +        srenew(grid->bb_simple,grid->nc_nalloc_simple*NNBSBB_B);
 +        srenew(grid->flags_simple,grid->nc_nalloc_simple);
 +        if (nbat->XFormat)
 +        {
 +            sfree_aligned(grid->bbj);
 +            snew_aligned(grid->bbj,grid->nc_nalloc_simple/2,16);
 +        }
 +    }
 +
 +    bbcz = grid->bbcz_simple;
 +    bb   = grid->bb_simple;
 +
 +#pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntPairsearch)) schedule(static)
 +    for(sc=0; sc<grid->nc; sc++)
 +    {
 +        int c,tx,na;
 +
 +        for(c=0; c<ncd; c++)
 +        {
 +            tx = sc*ncd + c;
 +
 +            na = NBNXN_CPU_CLUSTER_I_SIZE;
 +            while (na > 0 &&
 +                   nbat->type[tx*NBNXN_CPU_CLUSTER_I_SIZE+na-1] == nbat->ntype-1)
 +            {
 +                na--;
 +            }
 +
 +            if (na > 0)
 +            {
 +                switch (nbat->XFormat)
 +                {
 +                case nbatX4:
 +                    /* PACK_X4==NBNXN_CPU_CLUSTER_I_SIZE, so this is simple */
 +                    calc_bounding_box_x_x4(na,nbat->x+tx*STRIDE_P4,
 +                                           bb+tx*NNBSBB_B);
 +                    break;
 +                case nbatX8:
 +                    /* PACK_X8>NBNXN_CPU_CLUSTER_I_SIZE, more complicated */
 +                    calc_bounding_box_x_x8(na,nbat->x+X8_IND_A(tx*NBNXN_CPU_CLUSTER_I_SIZE),
 +                                           bb+tx*NNBSBB_B);
 +                    break;
 +                default:
 +                    calc_bounding_box(na,nbat->xstride,
 +                                      nbat->x+tx*NBNXN_CPU_CLUSTER_I_SIZE*nbat->xstride,
 +                                      bb+tx*NNBSBB_B);
 +                    break;
 +                }
 +                bbcz[tx*NNBSBB_D+0] = bb[tx*NNBSBB_B         +ZZ];
 +                bbcz[tx*NNBSBB_D+1] = bb[tx*NNBSBB_B+NNBSBB_C+ZZ];
 +
 +                /* No interaction optimization yet here */
 +                grid->flags_simple[tx] = NBNXN_CI_DO_LJ(0) | NBNXN_CI_DO_COUL(0);
 +            }
 +            else
 +            {
 +                grid->flags_simple[tx] = 0;
 +            }
 +        }
 +    }
 +
- #ifdef NBNXN_SEARCH_SSE
++#ifdef NBNXN_SEARCH_BB_SSE
 +    if (grid->bSimple && nbat->XFormat == nbatX8)
 +    {
 +        combine_bounding_box_pairs(grid,grid->bb_simple);
 +    }
 +#endif
 +}
 +
 +void nbnxn_get_ncells(nbnxn_search_t nbs,int *ncx,int *ncy)
 +{
 +    *ncx = nbs->grid[0].ncx;
 +    *ncy = nbs->grid[0].ncy;
 +}
 +
 +void nbnxn_get_atomorder(nbnxn_search_t nbs,int **a,int *n)
 +{
 +    const nbnxn_grid_t *grid;
 +
 +    grid = &nbs->grid[0];
 +
 +    /* Return the atom order for the home cell (index 0) */
 +    *a  = nbs->a;
 +
 +    *n = grid->cxy_ind[grid->ncx*grid->ncy]*grid->na_sc;
 +}
 +
 +void nbnxn_set_atomorder(nbnxn_search_t nbs)
 +{
 +    nbnxn_grid_t *grid;
 +    int ao,cx,cy,cxy,cz,j;
 +
 +    /* Set the atom order for the home cell (index 0) */
 +    grid = &nbs->grid[0];
 +
 +    ao = 0;
 +    for(cx=0; cx<grid->ncx; cx++)
 +    {
 +        for(cy=0; cy<grid->ncy; cy++)
 +        {
 +            cxy = cx*grid->ncy + cy;
 +            j   = grid->cxy_ind[cxy]*grid->na_sc;
 +            for(cz=0; cz<grid->cxy_na[cxy]; cz++)
 +            {
 +                nbs->a[j]     = ao;
 +                nbs->cell[ao] = j;
 +                ao++;
 +                j++;
 +            }
 +        }
 +    }
 +}
 +
 +/* Determines the cell range along one dimension that
 + * the bounding box b0 - b1 sees.
 + */
 +static void get_cell_range(real b0,real b1,
 +                           int nc,real c0,real s,real invs,
 +                           real d2,real r2,int *cf,int *cl)
 +{
 +    *cf = max((int)((b0 - c0)*invs),0);
 +
 +    while (*cf > 0 && d2 + sqr((b0 - c0) - (*cf-1+1)*s) < r2)
 +    {
 +        (*cf)--;
 +    }
 +
 +    *cl = min((int)((b1 - c0)*invs),nc-1);
 +    while (*cl < nc-1 && d2 + sqr((*cl+1)*s - (b1 - c0)) < r2)
 +    {
 +        (*cl)++;
 +    }
 +}
 +
 +/* Reference code calculating the distance^2 between two bounding boxes */
 +static float box_dist2(float bx0,float bx1,float by0,
 +                       float by1,float bz0,float bz1,
 +                       const float *bb)
 +{
 +    float d2;
 +    float dl,dh,dm,dm0;
 +
 +    d2 = 0;
 +
 +    dl  = bx0 - bb[BBU_X];
 +    dh  = bb[BBL_X] - bx1;
 +    dm  = max(dl,dh);
 +    dm0 = max(dm,0);
 +    d2 += dm0*dm0;
 +
 +    dl  = by0 - bb[BBU_Y];
 +    dh  = bb[BBL_Y] - by1;
 +    dm  = max(dl,dh);
 +    dm0 = max(dm,0);
 +    d2 += dm0*dm0;
 +
 +    dl  = bz0 - bb[BBU_Z];
 +    dh  = bb[BBL_Z] - bz1;
 +    dm  = max(dl,dh);
 +    dm0 = max(dm,0);
 +    d2 += dm0*dm0;
 +
 +    return d2;
 +}
 +
 +/* Plain C code calculating the distance^2 between two bounding boxes */
 +static float subc_bb_dist2(int si,const float *bb_i_ci,
 +                           int csj,const float *bb_j_all)
 +{
 +    const float *bb_i,*bb_j;
 +    float d2;
 +    float dl,dh,dm,dm0;
 +
 +    bb_i = bb_i_ci  +  si*NNBSBB_B;
 +    bb_j = bb_j_all + csj*NNBSBB_B;
 +
 +    d2 = 0;
 +
 +    dl  = bb_i[BBL_X] - bb_j[BBU_X];
 +    dh  = bb_j[BBL_X] - bb_i[BBU_X];
 +    dm  = max(dl,dh);
 +    dm0 = max(dm,0);
 +    d2 += dm0*dm0;
 +
 +    dl  = bb_i[BBL_Y] - bb_j[BBU_Y];
 +    dh  = bb_j[BBL_Y] - bb_i[BBU_Y];
 +    dm  = max(dl,dh);
 +    dm0 = max(dm,0);
 +    d2 += dm0*dm0;
 +
 +    dl  = bb_i[BBL_Z] - bb_j[BBU_Z];
 +    dh  = bb_j[BBL_Z] - bb_i[BBU_Z];
 +    dm  = max(dl,dh);
 +    dm0 = max(dm,0);
 +    d2 += dm0*dm0;
 +
 +    return d2;
 +}
 +
-     xi_l = _mm_load_ps(bb_i+shi+0*STRIDE_8BB);   \
-     yi_l = _mm_load_ps(bb_i+shi+1*STRIDE_8BB);   \
-     zi_l = _mm_load_ps(bb_i+shi+2*STRIDE_8BB);   \
-     xi_h = _mm_load_ps(bb_i+shi+3*STRIDE_8BB);   \
-     yi_h = _mm_load_ps(bb_i+shi+4*STRIDE_8BB);   \
-     zi_h = _mm_load_ps(bb_i+shi+5*STRIDE_8BB);   \
++#ifdef NBNXN_SEARCH_BB_SSE
 +
 +/* SSE code for bb distance for bb format xyz0 */
 +static float subc_bb_dist2_sse(int na_c,
 +                              int si,const float *bb_i_ci,
 +                              int csj,const float *bb_j_all)
 +{
 +    const float *bb_i,*bb_j;
 +
 +    __m128 bb_i_SSE0,bb_i_SSE1;
 +    __m128 bb_j_SSE0,bb_j_SSE1;
 +    __m128 dl_SSE;
 +    __m128 dh_SSE;
 +    __m128 dm_SSE;
 +    __m128 dm0_SSE;
 +    __m128 d2_SSE;
 +#ifndef GMX_X86_SSE4_1
 +    float d2_array[7],*d2_align;
 +
 +    d2_align = (float *)(((size_t)(d2_array+3)) & (~((size_t)15)));
 +#else
 +    float d2;
 +#endif
 +
 +    bb_i = bb_i_ci  +  si*NNBSBB_B;
 +    bb_j = bb_j_all + csj*NNBSBB_B;
 +
 +    bb_i_SSE0 = _mm_load_ps(bb_i);
 +    bb_i_SSE1 = _mm_load_ps(bb_i+NNBSBB_C);
 +    bb_j_SSE0 = _mm_load_ps(bb_j);
 +    bb_j_SSE1 = _mm_load_ps(bb_j+NNBSBB_C);
 +
 +    dl_SSE    = _mm_sub_ps(bb_i_SSE0,bb_j_SSE1);
 +    dh_SSE    = _mm_sub_ps(bb_j_SSE0,bb_i_SSE1);
 +
 +    dm_SSE    = _mm_max_ps(dl_SSE,dh_SSE);
 +    dm0_SSE   = _mm_max_ps(dm_SSE,_mm_setzero_ps());
 +#ifndef GMX_X86_SSE4_1
 +    d2_SSE    = _mm_mul_ps(dm0_SSE,dm0_SSE);
 +
 +    _mm_store_ps(d2_align,d2_SSE);
 +
 +    return d2_align[0] + d2_align[1] + d2_align[2];
 +#else
 +    /* SSE4.1 dot product of components 0,1,2 */
 +    d2_SSE    = _mm_dp_ps(dm0_SSE,dm0_SSE,0x71);
 +
 +    _mm_store_ss(&d2,d2_SSE);
 +
 +    return d2;
 +#endif
 +}
 +
 +/* Calculate bb bounding distances of bb_i[si,...,si+3] and store them in d2 */
 +#define SUBC_BB_DIST2_SSE_XXXX_INNER(si,bb_i,d2) \
 +{                                                \
 +    int    shi;                                  \
 +                                                 \
 +    __m128 dx_0,dy_0,dz_0;                       \
 +    __m128 dx_1,dy_1,dz_1;                       \
 +                                                 \
 +    __m128 mx,my,mz;                             \
 +    __m128 m0x,m0y,m0z;                          \
 +                                                 \
 +    __m128 d2x,d2y,d2z;                          \
 +    __m128 d2s,d2t;                              \
 +                                                 \
 +    shi = si*NNBSBB_D*DIM;                       \
 +                                                 \
-     xj_l = _mm_set1_ps(bb_j[0*STRIDE_8BB]);
-     yj_l = _mm_set1_ps(bb_j[1*STRIDE_8BB]);
-     zj_l = _mm_set1_ps(bb_j[2*STRIDE_8BB]);
-     xj_h = _mm_set1_ps(bb_j[3*STRIDE_8BB]);
-     yj_h = _mm_set1_ps(bb_j[4*STRIDE_8BB]);
-     zj_h = _mm_set1_ps(bb_j[5*STRIDE_8BB]);
++    xi_l = _mm_load_ps(bb_i+shi+0*STRIDE_PBB);   \
++    yi_l = _mm_load_ps(bb_i+shi+1*STRIDE_PBB);   \
++    zi_l = _mm_load_ps(bb_i+shi+2*STRIDE_PBB);   \
++    xi_h = _mm_load_ps(bb_i+shi+3*STRIDE_PBB);   \
++    yi_h = _mm_load_ps(bb_i+shi+4*STRIDE_PBB);   \
++    zi_h = _mm_load_ps(bb_i+shi+5*STRIDE_PBB);   \
 +                                                 \
 +    dx_0 = _mm_sub_ps(xi_l,xj_h);                \
 +    dy_0 = _mm_sub_ps(yi_l,yj_h);                \
 +    dz_0 = _mm_sub_ps(zi_l,zj_h);                \
 +                                                 \
 +    dx_1 = _mm_sub_ps(xj_l,xi_h);                \
 +    dy_1 = _mm_sub_ps(yj_l,yi_h);                \
 +    dz_1 = _mm_sub_ps(zj_l,zi_h);                \
 +                                                 \
 +    mx   = _mm_max_ps(dx_0,dx_1);                \
 +    my   = _mm_max_ps(dy_0,dy_1);                \
 +    mz   = _mm_max_ps(dz_0,dz_1);                \
 +                                                 \
 +    m0x  = _mm_max_ps(mx,zero);                  \
 +    m0y  = _mm_max_ps(my,zero);                  \
 +    m0z  = _mm_max_ps(mz,zero);                  \
 +                                                 \
 +    d2x  = _mm_mul_ps(m0x,m0x);                  \
 +    d2y  = _mm_mul_ps(m0y,m0y);                  \
 +    d2z  = _mm_mul_ps(m0z,m0z);                  \
 +                                                 \
 +    d2s  = _mm_add_ps(d2x,d2y);                  \
 +    d2t  = _mm_add_ps(d2s,d2z);                  \
 +                                                 \
 +    _mm_store_ps(d2+si,d2t);                     \
 +}
 +
 +/* SSE code for nsi bb distances for bb format xxxxyyyyzzzz */
 +static void subc_bb_dist2_sse_xxxx(const float *bb_j,
 +                                   int nsi,const float *bb_i,
 +                                   float *d2)
 +{
 +    __m128 xj_l,yj_l,zj_l;
 +    __m128 xj_h,yj_h,zj_h;
 +    __m128 xi_l,yi_l,zi_l;
 +    __m128 xi_h,yi_h,zi_h;
 +
 +    __m128 zero;
 +
 +    zero = _mm_setzero_ps();
 +
-     /* Here we "loop" over si (0,STRIDE_8BB) from 0 to nsi with step STRIDE_8BB.
++    xj_l = _mm_set1_ps(bb_j[0*STRIDE_PBB]);
++    yj_l = _mm_set1_ps(bb_j[1*STRIDE_PBB]);
++    zj_l = _mm_set1_ps(bb_j[2*STRIDE_PBB]);
++    xj_h = _mm_set1_ps(bb_j[3*STRIDE_PBB]);
++    yj_h = _mm_set1_ps(bb_j[4*STRIDE_PBB]);
++    zj_h = _mm_set1_ps(bb_j[5*STRIDE_PBB]);
 +
-     if (STRIDE_8BB < nsi)
++    /* Here we "loop" over si (0,STRIDE_PBB) from 0 to nsi with step STRIDE_PBB.
 +     * But as we know the number of iterations is 1 or 2, we unroll manually.
 +     */
 +    SUBC_BB_DIST2_SSE_XXXX_INNER(0,bb_i,d2);
-         SUBC_BB_DIST2_SSE_XXXX_INNER(STRIDE_8BB,bb_i,d2);
++    if (STRIDE_PBB < nsi)
 +    {
- #endif /* NBNXN_SEARCH_SSE */
++        SUBC_BB_DIST2_SSE_XXXX_INNER(STRIDE_PBB,bb_i,d2);
 +    }
 +}
 +
-     na_c_sse = NBNXN_GPU_CLUSTER_SIZE/STRIDE_8BB;
-     ix_SSE0 = _mm_load_ps(x_i+(si*na_c_sse*DIM+0)*STRIDE_8BB);
-     iy_SSE0 = _mm_load_ps(x_i+(si*na_c_sse*DIM+1)*STRIDE_8BB);
-     iz_SSE0 = _mm_load_ps(x_i+(si*na_c_sse*DIM+2)*STRIDE_8BB);
-     ix_SSE1 = _mm_load_ps(x_i+(si*na_c_sse*DIM+3)*STRIDE_8BB);
-     iy_SSE1 = _mm_load_ps(x_i+(si*na_c_sse*DIM+4)*STRIDE_8BB);
-     iz_SSE1 = _mm_load_ps(x_i+(si*na_c_sse*DIM+5)*STRIDE_8BB);
++#endif /* NBNXN_SEARCH_BB_SSE */
 +
 +/* Plain C function which determines if any atom pair between two cells
 + * is within distance sqrt(rl2).
 + */
 +static gmx_bool subc_in_range_x(int na_c,
 +                                int si,const real *x_i,
 +                                int csj,int stride,const real *x_j,
 +                                real rl2)
 +{
 +    int  i,j,i0,j0;
 +    real d2;
 +
 +    for(i=0; i<na_c; i++)
 +    {
 +        i0 = (si*na_c + i)*DIM;
 +        for(j=0; j<na_c; j++)
 +        {
 +            j0 = (csj*na_c + j)*stride;
 +
 +            d2 = sqr(x_i[i0  ] - x_j[j0  ]) +
 +                 sqr(x_i[i0+1] - x_j[j0+1]) +
 +                 sqr(x_i[i0+2] - x_j[j0+2]);
 +
 +            if (d2 < rl2)
 +            {
 +                return TRUE;
 +            }
 +        }
 +    }
 +
 +    return FALSE;
 +}
 +
 +/* SSE function which determines if any atom pair between two cells,
 + * both with 8 atoms, is within distance sqrt(rl2).
 + */
 +static gmx_bool subc_in_range_sse8(int na_c,
 +                                   int si,const real *x_i,
 +                                   int csj,int stride,const real *x_j,
 +                                   real rl2)
 +{
 +#ifdef NBNXN_SEARCH_SSE_SINGLE
 +    __m128 ix_SSE0,iy_SSE0,iz_SSE0;
 +    __m128 ix_SSE1,iy_SSE1,iz_SSE1;
 +
 +    __m128 rc2_SSE;
 +
 +    int na_c_sse;
 +    int j0,j1;
 +
 +    rc2_SSE   = _mm_set1_ps(rl2);
 +
-     return nbl->cj4[cj_ind>>2].cj[cj_ind & 3];
++    na_c_sse = NBNXN_GPU_CLUSTER_SIZE/STRIDE_PBB;
++    ix_SSE0 = _mm_load_ps(x_i+(si*na_c_sse*DIM+0)*STRIDE_PBB);
++    iy_SSE0 = _mm_load_ps(x_i+(si*na_c_sse*DIM+1)*STRIDE_PBB);
++    iz_SSE0 = _mm_load_ps(x_i+(si*na_c_sse*DIM+2)*STRIDE_PBB);
++    ix_SSE1 = _mm_load_ps(x_i+(si*na_c_sse*DIM+3)*STRIDE_PBB);
++    iy_SSE1 = _mm_load_ps(x_i+(si*na_c_sse*DIM+4)*STRIDE_PBB);
++    iz_SSE1 = _mm_load_ps(x_i+(si*na_c_sse*DIM+5)*STRIDE_PBB);
 +
 +    /* We loop from the outer to the inner particles to maximize
 +     * the chance that we find a pair in range quickly and return.
 +     */
 +    j0 = csj*na_c;
 +    j1 = j0 + na_c - 1;
 +    while (j0 < j1)
 +    {
 +        __m128 jx0_SSE,jy0_SSE,jz0_SSE;
 +        __m128 jx1_SSE,jy1_SSE,jz1_SSE;
 +
 +        __m128 dx_SSE0,dy_SSE0,dz_SSE0;
 +        __m128 dx_SSE1,dy_SSE1,dz_SSE1;
 +        __m128 dx_SSE2,dy_SSE2,dz_SSE2;
 +        __m128 dx_SSE3,dy_SSE3,dz_SSE3;
 +
 +        __m128 rsq_SSE0;
 +        __m128 rsq_SSE1;
 +        __m128 rsq_SSE2;
 +        __m128 rsq_SSE3;
 +
 +        __m128 wco_SSE0;
 +        __m128 wco_SSE1;
 +        __m128 wco_SSE2;
 +        __m128 wco_SSE3;
 +        __m128 wco_any_SSE01,wco_any_SSE23,wco_any_SSE;
 +
 +        jx0_SSE = _mm_load1_ps(x_j+j0*stride+0);
 +        jy0_SSE = _mm_load1_ps(x_j+j0*stride+1);
 +        jz0_SSE = _mm_load1_ps(x_j+j0*stride+2);
 +
 +        jx1_SSE = _mm_load1_ps(x_j+j1*stride+0);
 +        jy1_SSE = _mm_load1_ps(x_j+j1*stride+1);
 +        jz1_SSE = _mm_load1_ps(x_j+j1*stride+2);
 +
 +        /* Calculate distance */
 +        dx_SSE0            = _mm_sub_ps(ix_SSE0,jx0_SSE);
 +        dy_SSE0            = _mm_sub_ps(iy_SSE0,jy0_SSE);
 +        dz_SSE0            = _mm_sub_ps(iz_SSE0,jz0_SSE);
 +        dx_SSE1            = _mm_sub_ps(ix_SSE1,jx0_SSE);
 +        dy_SSE1            = _mm_sub_ps(iy_SSE1,jy0_SSE);
 +        dz_SSE1            = _mm_sub_ps(iz_SSE1,jz0_SSE);
 +        dx_SSE2            = _mm_sub_ps(ix_SSE0,jx1_SSE);
 +        dy_SSE2            = _mm_sub_ps(iy_SSE0,jy1_SSE);
 +        dz_SSE2            = _mm_sub_ps(iz_SSE0,jz1_SSE);
 +        dx_SSE3            = _mm_sub_ps(ix_SSE1,jx1_SSE);
 +        dy_SSE3            = _mm_sub_ps(iy_SSE1,jy1_SSE);
 +        dz_SSE3            = _mm_sub_ps(iz_SSE1,jz1_SSE);
 +
 +        /* rsq = dx*dx+dy*dy+dz*dz */
 +        rsq_SSE0           = gmx_mm_calc_rsq_ps(dx_SSE0,dy_SSE0,dz_SSE0);
 +        rsq_SSE1           = gmx_mm_calc_rsq_ps(dx_SSE1,dy_SSE1,dz_SSE1);
 +        rsq_SSE2           = gmx_mm_calc_rsq_ps(dx_SSE2,dy_SSE2,dz_SSE2);
 +        rsq_SSE3           = gmx_mm_calc_rsq_ps(dx_SSE3,dy_SSE3,dz_SSE3);
 +
 +        wco_SSE0           = _mm_cmplt_ps(rsq_SSE0,rc2_SSE);
 +        wco_SSE1           = _mm_cmplt_ps(rsq_SSE1,rc2_SSE);
 +        wco_SSE2           = _mm_cmplt_ps(rsq_SSE2,rc2_SSE);
 +        wco_SSE3           = _mm_cmplt_ps(rsq_SSE3,rc2_SSE);
 +
 +        wco_any_SSE01      = _mm_or_ps(wco_SSE0,wco_SSE1);
 +        wco_any_SSE23      = _mm_or_ps(wco_SSE2,wco_SSE3);
 +        wco_any_SSE        = _mm_or_ps(wco_any_SSE01,wco_any_SSE23);
 +
 +        if (_mm_movemask_ps(wco_any_SSE))
 +        {
 +            return TRUE;
 +        }
 +
 +        j0++;
 +        j1--;
 +    }
 +    return FALSE;
 +
 +#else
 +    /* No SSE */
 +    gmx_incons("SSE function called without SSE support");
 +
 +    return TRUE;
 +#endif
 +}
 +
 +/* Returns the j sub-cell for index cj_ind */
 +static int nbl_cj(const nbnxn_pairlist_t *nbl,int cj_ind)
 +{
-     return nbl->cj4[cj_ind>>2].imei[0].imask;
++    return nbl->cj4[cj_ind >> NBNXN_GPU_JGROUP_SIZE_2LOG].cj[cj_ind & (NBNXN_GPU_JGROUP_SIZE - 1)];
 +}
 +
 +/* Returns the i-interaction mask of the j sub-cell for index cj_ind */
 +static unsigned nbl_imask0(const nbnxn_pairlist_t *nbl,int cj_ind)
 +{
-     ncj4_max = ((nbl->work->cj_ind + nsupercell*GPU_NSUBCELL + 4-1) >> 2);
++    return nbl->cj4[cj_ind >> NBNXN_GPU_JGROUP_SIZE_2LOG].imei[0].imask;
 +}
 +
 +/* Ensures there is enough space for extra extra exclusion masks */
 +static void check_excl_space(nbnxn_pairlist_t *nbl,int extra)
 +{
 +    if (nbl->nexcl+extra > nbl->excl_nalloc)
 +    {
 +        nbl->excl_nalloc = over_alloc_small(nbl->nexcl+extra);
 +        nbnxn_realloc_void((void **)&nbl->excl,
 +                           nbl->nexcl*sizeof(*nbl->excl),
 +                           nbl->excl_nalloc*sizeof(*nbl->excl),
 +                           nbl->alloc,nbl->free);
 +    }
 +}
 +
 +/* Ensures there is enough space for ncell extra j-cells in the list */
 +static void check_subcell_list_space_simple(nbnxn_pairlist_t *nbl,
 +                                            int ncell)
 +{
 +    int cj_max;
 +
 +    cj_max = nbl->ncj + ncell;
 +
 +    if (cj_max > nbl->cj_nalloc)
 +    {
 +        nbl->cj_nalloc = over_alloc_small(cj_max);
 +        nbnxn_realloc_void((void **)&nbl->cj,
 +                           nbl->ncj*sizeof(*nbl->cj),
 +                           nbl->cj_nalloc*sizeof(*nbl->cj),
 +                           nbl->alloc,nbl->free);
 +    }
 +}
 +
 +/* Ensures there is enough space for ncell extra j-subcells in the list */
 +static void check_subcell_list_space_supersub(nbnxn_pairlist_t *nbl,
 +                                              int nsupercell)
 +{
 +    int ncj4_max,j4,j,w,t;
 +
 +#define NWARP       2
 +#define WARP_SIZE  32
 +
 +    /* We can have maximally nsupercell*GPU_NSUBCELL sj lists */
 +    /* We can store 4 j-subcell - i-supercell pairs in one struct.
 +     * since we round down, we need one extra entry.
 +     */
-     snew_aligned(nbl->work->bb_ci,GPU_NSUBCELL/STRIDE_8BB*NNBSBB_XXXX,32);
++    ncj4_max = ((nbl->work->cj_ind + nsupercell*GPU_NSUBCELL + NBNXN_GPU_JGROUP_SIZE - 1) >> NBNXN_GPU_JGROUP_SIZE_2LOG);
 +
 +    if (ncj4_max > nbl->cj4_nalloc)
 +    {
 +        nbl->cj4_nalloc = over_alloc_small(ncj4_max);
 +        nbnxn_realloc_void((void **)&nbl->cj4,
 +                           nbl->work->cj4_init*sizeof(*nbl->cj4),
 +                           nbl->cj4_nalloc*sizeof(*nbl->cj4),
 +                           nbl->alloc,nbl->free);
 +    }
 +
 +    if (ncj4_max > nbl->work->cj4_init)
 +    {
 +        for(j4=nbl->work->cj4_init; j4<ncj4_max; j4++)
 +        {
 +            /* No i-subcells and no excl's in the list initially */
 +            for(w=0; w<NWARP; w++)
 +            {
 +                nbl->cj4[j4].imei[w].imask    = 0U;
 +                nbl->cj4[j4].imei[w].excl_ind = 0;
 +
 +            }
 +        }
 +        nbl->work->cj4_init = ncj4_max;
 +    }
 +}
 +
 +/* Set all excl masks for one GPU warp no exclusions */
 +static void set_no_excls(nbnxn_excl_t *excl)
 +{
 +    int t;
 +
 +    for(t=0; t<WARP_SIZE; t++)
 +    {
 +        /* Turn all interaction bits on */
 +        excl->pair[t] = NBNXN_INT_MASK_ALL;
 +    }
 +}
 +
 +/* Initializes a single nbnxn_pairlist_t data structure */
 +static void nbnxn_init_pairlist(nbnxn_pairlist_t *nbl,
 +                                gmx_bool bSimple,
 +                                nbnxn_alloc_t *alloc,
 +                                nbnxn_free_t  *free)
 +{
 +    if (alloc == NULL)
 +    {
 +        nbl->alloc = nbnxn_alloc_aligned;
 +    }
 +    else
 +    {
 +        nbl->alloc = alloc;
 +    }
 +    if (free == NULL)
 +    {
 +        nbl->free = nbnxn_free_aligned;
 +    }
 +    else
 +    {
 +        nbl->free = free;
 +    }
 +
 +    nbl->bSimple     = bSimple;
 +    nbl->na_sc       = 0;
 +    nbl->na_ci       = 0;
 +    nbl->na_cj       = 0;
 +    nbl->nci         = 0;
 +    nbl->ci          = NULL;
 +    nbl->ci_nalloc   = 0;
 +    nbl->ncj         = 0;
 +    nbl->cj          = NULL;
 +    nbl->cj_nalloc   = 0;
 +    nbl->ncj4        = 0;
 +    /* We need one element extra in sj, so alloc initially with 1 */
 +    nbl->cj4_nalloc  = 0;
 +    nbl->cj4         = NULL;
 +    nbl->nci_tot     = 0;
 +
 +    if (!nbl->bSimple)
 +    {
 +        nbl->excl        = NULL;
 +        nbl->excl_nalloc = 0;
 +        nbl->nexcl       = 0;
 +        check_excl_space(nbl,1);
 +        nbl->nexcl       = 1;
 +        set_no_excls(&nbl->excl[0]);
 +    }
 +
 +    snew(nbl->work,1);
 +#ifdef NBNXN_BBXXXX
-     snew_aligned(nbl->work->bb_ci,GPU_NSUBCELL*NNBSBB_B,32);
++    snew_aligned(nbl->work->bb_ci,GPU_NSUBCELL/STRIDE_PBB*NNBSBB_XXXX,NBNXN_MEM_ALIGN);
 +#else
-     snew_aligned(nbl->work->x_ci,NBNXN_NA_SC_MAX*DIM,32);
++    snew_aligned(nbl->work->bb_ci,GPU_NSUBCELL*NNBSBB_B,NBNXN_MEM_ALIGN);
 +#endif
-     snew_aligned(nbl->work->x_ci_simd_4xn,1,32);
-     snew_aligned(nbl->work->x_ci_simd_2xnn,1,32);
++    snew_aligned(nbl->work->x_ci,NBNXN_NA_SC_MAX*DIM,NBNXN_MEM_ALIGN);
 +#ifdef GMX_NBNXN_SIMD
-     snew_aligned(nbl->work->d2,GPU_NSUBCELL,32);
++    snew_aligned(nbl->work->x_ci_simd_4xn,1,NBNXN_MEM_ALIGN);
++    snew_aligned(nbl->work->x_ci_simd_2xnn,1,NBNXN_MEM_ALIGN);
 +#endif
-             nbl->nci_tot/(0.25*nbl->ncj4));
++    snew_aligned(nbl->work->d2,GPU_NSUBCELL,NBNXN_MEM_ALIGN);
 +}
 +
 +void nbnxn_init_pairlist_set(nbnxn_pairlist_set_t *nbl_list,
 +                             gmx_bool bSimple, gmx_bool bCombined,
 +                             nbnxn_alloc_t *alloc,
 +                             nbnxn_free_t  *free)
 +{
 +    int i;
 +
 +    nbl_list->bSimple   = bSimple;
 +    nbl_list->bCombined = bCombined;
 +
 +    nbl_list->nnbl = gmx_omp_nthreads_get(emntNonbonded);
 +
 +    if (!nbl_list->bCombined &&
 +        nbl_list->nnbl > NBNXN_BUFFERFLAG_MAX_THREADS)
 +    {
 +        gmx_fatal(FARGS,"%d OpenMP threads were requested. Since the non-bonded force buffer reduction is prohibitively slow with more than %d threads, we do not allow this. Use %d or less OpenMP threads.",
 +                  nbl_list->nnbl,NBNXN_BUFFERFLAG_MAX_THREADS,NBNXN_BUFFERFLAG_MAX_THREADS);
 +    }
 +
 +    snew(nbl_list->nbl,nbl_list->nnbl);
 +    /* Execute in order to avoid memory interleaving between threads */
 +#pragma omp parallel for num_threads(nbl_list->nnbl) schedule(static)
 +    for(i=0; i<nbl_list->nnbl; i++)
 +    {
 +        /* Allocate the nblist data structure locally on each thread
 +         * to optimize memory access for NUMA architectures.
 +         */
 +        snew(nbl_list->nbl[i],1);
 +
 +        /* Only list 0 is used on the GPU, use normal allocation for i>0 */
 +        if (i == 0)
 +        {
 +            nbnxn_init_pairlist(nbl_list->nbl[i],nbl_list->bSimple,alloc,free);
 +        }
 +        else
 +        {
 +            nbnxn_init_pairlist(nbl_list->nbl[i],nbl_list->bSimple,NULL,NULL);
 +        }
 +    }
 +}
 +
 +/* Print statistics of a pair list, used for debug output */
 +static void print_nblist_statistics_simple(FILE *fp,const nbnxn_pairlist_t *nbl,
 +                                           const nbnxn_search_t nbs,real rl)
 +{
 +    const nbnxn_grid_t *grid;
 +    int cs[SHIFTS];
 +    int s,i,j;
 +    int npexcl;
 +
 +    /* This code only produces correct statistics with domain decomposition */
 +    grid = &nbs->grid[0];
 +
 +    fprintf(fp,"nbl nci %d ncj %d\n",
 +            nbl->nci,nbl->ncj);
 +    fprintf(fp,"nbl na_sc %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
 +            nbl->na_sc,rl,nbl->ncj,nbl->ncj/(double)grid->nc,
 +            nbl->ncj/(double)grid->nc*grid->na_sc,
 +            nbl->ncj/(double)grid->nc*grid->na_sc/(0.5*4.0/3.0*M_PI*rl*rl*rl*grid->nc*grid->na_sc/det(nbs->box)));
 +
 +    fprintf(fp,"nbl average j cell list length %.1f\n",
 +            0.25*nbl->ncj/(double)nbl->nci);
 +
 +    for(s=0; s<SHIFTS; s++)
 +    {
 +        cs[s] = 0;
 +    }
 +    npexcl = 0;
 +    for(i=0; i<nbl->nci; i++)
 +    {
 +        cs[nbl->ci[i].shift & NBNXN_CI_SHIFT] +=
 +            nbl->ci[i].cj_ind_end - nbl->ci[i].cj_ind_start;
 +
 +        j = nbl->ci[i].cj_ind_start;
 +        while (j < nbl->ci[i].cj_ind_end &&
 +               nbl->cj[j].excl != NBNXN_INT_MASK_ALL)
 +        {
 +            npexcl++;
 +            j++;
 +        }
 +    }
 +    fprintf(fp,"nbl cell pairs, total: %d excl: %d %.1f%%\n",
 +            nbl->ncj,npexcl,100*npexcl/(double)nbl->ncj);
 +    for(s=0; s<SHIFTS; s++)
 +    {
 +        if (cs[s] > 0)
 +        {
 +            fprintf(fp,"nbl shift %2d ncj %3d\n",s,cs[s]);
 +        }
 +    }
 +}
 +
 +/* Print statistics of a pair lists, used for debug output */
 +static void print_nblist_statistics_supersub(FILE *fp,const nbnxn_pairlist_t *nbl,
 +                                             const nbnxn_search_t nbs,real rl)
 +{
 +    const nbnxn_grid_t *grid;
 +    int i,j4,j,si,b;
 +    int c[GPU_NSUBCELL+1];
 +
 +    /* This code only produces correct statistics with domain decomposition */
 +    grid = &nbs->grid[0];
 +
 +    fprintf(fp,"nbl nsci %d ncj4 %d nsi %d excl4 %d\n",
 +            nbl->nsci,nbl->ncj4,nbl->nci_tot,nbl->nexcl);
 +    fprintf(fp,"nbl na_c %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
 +            nbl->na_ci,rl,nbl->nci_tot,nbl->nci_tot/(double)grid->nsubc_tot,
 +            nbl->nci_tot/(double)grid->nsubc_tot*grid->na_c,
 +            nbl->nci_tot/(double)grid->nsubc_tot*grid->na_c/(0.5*4.0/3.0*M_PI*rl*rl*rl*grid->nsubc_tot*grid->na_c/det(nbs->box)));
 +
 +    fprintf(fp,"nbl average j super cell list length %.1f\n",
 +            0.25*nbl->ncj4/(double)nbl->nsci);
 +    fprintf(fp,"nbl average i sub cell list length %.1f\n",
-             for(j=0; j<4; j++)
++            nbl->nci_tot/((double)nbl->ncj4));
 +
 +    for(si=0; si<=GPU_NSUBCELL; si++)
 +    {
 +        c[si] = 0;
 +    }
 +    for(i=0; i<nbl->nsci; i++)
 +    {
 +        for(j4=nbl->sci[i].cj4_ind_start; j4<nbl->sci[i].cj4_ind_end; j4++)
 +        {
-             excl[w]->pair[(ej&(4-1))*nbl->na_ci+ei] &=
-                 ~(1U << (sj_offset*GPU_NSUBCELL+si));
++            for(j=0; j<NBNXN_GPU_JGROUP_SIZE; j++)
 +            {
 +                b = 0;
 +                for(si=0; si<GPU_NSUBCELL; si++)
 +                {
 +                    if (nbl->cj4[j4].imei[0].imask & (1U << (j*GPU_NSUBCELL + si)))
 +                    {
 +                        b++;
 +                    }
 +                }
 +                c[b]++;
 +            }
 +        }
 +    }
 +    for(b=0; b<=GPU_NSUBCELL; b++)
 +    {
 +        fprintf(fp,"nbl j-list #i-subcell %d %7d %4.1f\n",
 +                b,c[b],100.0*c[b]/(double)(nbl->ncj4*NBNXN_GPU_JGROUP_SIZE));
 +    }
 +}
 +
 +/* Print the full pair list, used for debug output */
 +static void print_supersub_nsp(const char *fn,
 +                               const nbnxn_pairlist_t *nbl,
 +                               int iloc)
 +{
 +    char buf[STRLEN];
 +    FILE *fp;
 +    int i,nsp,j4,p;
 +
 +    sprintf(buf,"%s_%s.xvg",fn,NONLOCAL_I(iloc) ? "nl" : "l");
 +    fp = ffopen(buf,"w");
 +
 +    for(i=0; i<nbl->nci; i++)
 +    {
 +        nsp = 0;
 +        for(j4=nbl->sci[i].cj4_ind_start; j4<nbl->sci[i].cj4_ind_end; j4++)
 +        {
 +            for(p=0; p<NBNXN_GPU_JGROUP_SIZE*GPU_NSUBCELL; p++)
 +            {
 +                nsp += (nbl->cj4[j4].imei[0].imask >> p) & 1;
 +            }
 +        }
 +        fprintf(fp,"%4d %3d %3d\n",
 +                i,
 +                nsp,
 +                nbl->sci[i].cj4_ind_end-nbl->sci[i].cj4_ind_start);
 +    }
 +
 +    fclose(fp);
 +}
 +
 +/* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp */
 +static void low_get_nbl_exclusions(nbnxn_pairlist_t *nbl,int cj4,
 +                                   int warp,nbnxn_excl_t **excl)
 +{
 +    if (nbl->cj4[cj4].imei[warp].excl_ind == 0)
 +    {
 +        /* No exclusions set, make a new list entry */
 +        nbl->cj4[cj4].imei[warp].excl_ind = nbl->nexcl;
 +        nbl->nexcl++;
 +        *excl = &nbl->excl[nbl->cj4[cj4].imei[warp].excl_ind];
 +        set_no_excls(*excl);
 +    }
 +    else
 +    {
 +        /* We already have some exclusions, new ones can be added to the list */
 +        *excl = &nbl->excl[nbl->cj4[cj4].imei[warp].excl_ind];
 +    }
 +}
 +
 +/* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp,
 + * allocates extra memory, if necessary.
 + */
 +static void get_nbl_exclusions_1(nbnxn_pairlist_t *nbl,int cj4,
 +                                 int warp,nbnxn_excl_t **excl)
 +{
 +    if (nbl->cj4[cj4].imei[warp].excl_ind == 0)
 +    {
 +        /* We need to make a new list entry, check if we have space */
 +        check_excl_space(nbl,1);
 +    }
 +    low_get_nbl_exclusions(nbl,cj4,warp,excl);
 +}
 +
 +/* Returns pointers to the exclusion mask for cj4-unit cj4 for both warps,
 + * allocates extra memory, if necessary.
 + */
 +static void get_nbl_exclusions_2(nbnxn_pairlist_t *nbl,int cj4,
 +                                 nbnxn_excl_t **excl_w0,
 +                                 nbnxn_excl_t **excl_w1)
 +{
 +    /* Check for space we might need */
 +    check_excl_space(nbl,2);
 +
 +    low_get_nbl_exclusions(nbl,cj4,0,excl_w0);
 +    low_get_nbl_exclusions(nbl,cj4,1,excl_w1);
 +}
 +
 +/* Sets the self exclusions i=j and pair exclusions i>j */
 +static void set_self_and_newton_excls_supersub(nbnxn_pairlist_t *nbl,
 +                                               int cj4_ind,int sj_offset,
 +                                               int si)
 +{
 +    nbnxn_excl_t *excl[2];
 +    int  ei,ej,w;
 +
 +    /* Here we only set the set self and double pair exclusions */
 +
 +    get_nbl_exclusions_2(nbl,cj4_ind,&excl[0],&excl[1]);
 +
 +    /* Only minor < major bits set */
 +    for(ej=0; ej<nbl->na_ci; ej++)
 +    {
 +        w = (ej>>2);
 +        for(ei=ej; ei<nbl->na_ci; ei++)
 +        {
-         cj4_ind   = (nbl->work->cj_ind >> 2);
++            excl[w]->pair[(ej & (NBNXN_GPU_JGROUP_SIZE-1))*nbl->na_ci + ei] &=
++                ~(1U << (sj_offset*GPU_NSUBCELL + si));
 +        }
 +    }
 +}
 +
 +/* Returns a diagonal or off-diagonal interaction mask for plain C lists */
 +static unsigned int get_imask(gmx_bool rdiag,int ci,int cj)
 +{
 +    return (rdiag && ci == cj ? NBNXN_INT_MASK_DIAG : NBNXN_INT_MASK_ALL);
 +}
 +
 +/* Returns a diagonal or off-diagonal interaction mask for SIMD128 lists */
 +static unsigned int get_imask_x86_simd128(gmx_bool rdiag,int ci,int cj)
 +{
 +#ifndef GMX_DOUBLE /* cj-size = 4 */
 +    return (rdiag && ci == cj ? NBNXN_INT_MASK_DIAG : NBNXN_INT_MASK_ALL);
 +#else              /* cj-size = 2 */
 +    return (rdiag && ci*2 == cj ? NBNXN_INT_MASK_DIAG_J2_0 :
 +            (rdiag && ci*2+1 == cj ? NBNXN_INT_MASK_DIAG_J2_1 :
 +             NBNXN_INT_MASK_ALL));
 +#endif
 +}
 +
 +/* Returns a diagonal or off-diagonal interaction mask for SIMD256 lists */
 +static unsigned int get_imask_x86_simd256(gmx_bool rdiag,int ci,int cj)
 +{
 +#ifndef GMX_DOUBLE /* cj-size = 8 */
 +    return (rdiag && ci == cj*2 ? NBNXN_INT_MASK_DIAG_J8_0 :
 +            (rdiag && ci == cj*2+1 ? NBNXN_INT_MASK_DIAG_J8_1 :
 +             NBNXN_INT_MASK_ALL));
 +#else              /* cj-size = 4 */
 +    return (rdiag && ci == cj ? NBNXN_INT_MASK_DIAG : NBNXN_INT_MASK_ALL);
 +#endif
 +}
 +
 +#ifdef GMX_NBNXN_SIMD
 +#if GMX_NBNXN_SIMD_BITWIDTH == 128
 +#define get_imask_x86_simd_4xn  get_imask_x86_simd128
 +#else
 +#if GMX_NBNXN_SIMD_BITWIDTH == 256
 +#define get_imask_x86_simd_4xn  get_imask_x86_simd256
 +#define get_imask_x86_simd_2xnn get_imask_x86_simd128
 +#else
 +#error "unsupported GMX_NBNXN_SIMD_BITWIDTH"
 +#endif
 +#endif
 +#endif
 +
 +/* Plain C code for making a pair list of cell ci vs cell cjf-cjl.
 + * Checks bounding box distances and possibly atom pair distances.
 + */
 +static void make_cluster_list_simple(const nbnxn_grid_t *gridj,
 +                                     nbnxn_pairlist_t *nbl,
 +                                     int ci,int cjf,int cjl,
 +                                     gmx_bool remove_sub_diag,
 +                                     const real *x_j,
 +                                     real rl2,float rbb2,
 +                                     int *ndistc)
 +{
 +    const nbnxn_list_work_t *work;
 +
 +    const float *bb_ci;
 +    const real  *x_ci;
 +
 +    gmx_bool   InRange;
 +    real       d2;
 +    int        cjf_gl,cjl_gl,cj;
 +
 +    work = nbl->work;
 +
 +    bb_ci = nbl->work->bb_ci;
 +    x_ci  = nbl->work->x_ci;
 +
 +    InRange = FALSE;
 +    while (!InRange && cjf <= cjl)
 +    {
 +        d2 = subc_bb_dist2(0,bb_ci,cjf,gridj->bb);
 +        *ndistc += 2;
 +
 +        /* Check if the distance is within the distance where
 +         * we use only the bounding box distance rbb,
 +         * or within the cut-off and there is at least one atom pair
 +         * within the cut-off.
 +         */
 +        if (d2 < rbb2)
 +        {
 +            InRange = TRUE;
 +        }
 +        else if (d2 < rl2)
 +        {
 +            int i,j;
 +
 +            cjf_gl = gridj->cell0 + cjf;
 +            for(i=0; i<NBNXN_CPU_CLUSTER_I_SIZE && !InRange; i++)
 +            {
 +                for(j=0; j<NBNXN_CPU_CLUSTER_I_SIZE; j++)
 +                {
 +                    InRange = InRange ||
 +                        (sqr(x_ci[i*STRIDE_XYZ+XX] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+XX]) +
 +                         sqr(x_ci[i*STRIDE_XYZ+YY] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+YY]) +
 +                         sqr(x_ci[i*STRIDE_XYZ+ZZ] - x_j[(cjf_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+ZZ]) < rl2);
 +                }
 +            }
 +            *ndistc += NBNXN_CPU_CLUSTER_I_SIZE*NBNXN_CPU_CLUSTER_I_SIZE;
 +        }
 +        if (!InRange)
 +        {
 +            cjf++;
 +        }
 +    }
 +    if (!InRange)
 +    {
 +        return;
 +    }
 +
 +    InRange = FALSE;
 +    while (!InRange && cjl > cjf)
 +    {
 +        d2 = subc_bb_dist2(0,bb_ci,cjl,gridj->bb);
 +        *ndistc += 2;
 +
 +        /* Check if the distance is within the distance where
 +         * we use only the bounding box distance rbb,
 +         * or within the cut-off and there is at least one atom pair
 +         * within the cut-off.
 +         */
 +        if (d2 < rbb2)
 +        {
 +            InRange = TRUE;
 +        }
 +        else if (d2 < rl2)
 +        {
 +            int i,j;
 +
 +            cjl_gl = gridj->cell0 + cjl;
 +            for(i=0; i<NBNXN_CPU_CLUSTER_I_SIZE && !InRange; i++)
 +            {
 +                for(j=0; j<NBNXN_CPU_CLUSTER_I_SIZE; j++)
 +                {
 +                    InRange = InRange ||
 +                        (sqr(x_ci[i*STRIDE_XYZ+XX] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+XX]) +
 +                         sqr(x_ci[i*STRIDE_XYZ+YY] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+YY]) +
 +                         sqr(x_ci[i*STRIDE_XYZ+ZZ] - x_j[(cjl_gl*NBNXN_CPU_CLUSTER_I_SIZE+j)*STRIDE_XYZ+ZZ]) < rl2);
 +                }
 +            }
 +            *ndistc += NBNXN_CPU_CLUSTER_I_SIZE*NBNXN_CPU_CLUSTER_I_SIZE;
 +        }
 +        if (!InRange)
 +        {
 +            cjl--;
 +        }
 +    }
 +
 +    if (cjf <= cjl)
 +    {
 +        for(cj=cjf; cj<=cjl; cj++)
 +        {
 +            /* Store cj and the interaction mask */
 +            nbl->cj[nbl->ncj].cj   = gridj->cell0 + cj;
 +            nbl->cj[nbl->ncj].excl = get_imask(remove_sub_diag,ci,cj);
 +            nbl->ncj++;
 +        }
 +        /* Increase the closing index in i super-cell list */
 +        nbl->ci[nbl->nci].cj_ind_end = nbl->ncj;
 +    }
 +}
 +
 +#ifdef GMX_NBNXN_SIMD_4XN
 +#include "nbnxn_search_simd_4xn.h"
 +#endif
 +#ifdef GMX_NBNXN_SIMD_2XNN
 +#include "nbnxn_search_simd_2xnn.h"
 +#endif
 +
 +/* Plain C or SSE code for making a pair list of super-cell sci vs scj.
 + * Checks bounding box distances and possibly atom pair distances.
 + */
 +static void make_cluster_list_supersub(const nbnxn_search_t nbs,
 +                                       const nbnxn_grid_t *gridi,
 +                                       const nbnxn_grid_t *gridj,
 +                                       nbnxn_pairlist_t *nbl,
 +                                       int sci,int scj,
 +                                       gmx_bool sci_equals_scj,
 +                                       int stride,const real *x,
 +                                       real rl2,float rbb2,
 +                                       int *ndistc)
 +{
 +    int  na_c;
 +    int  npair;
 +    int  cjo,ci1,ci,cj,cj_gl;
 +    int  cj4_ind,cj_offset;
 +    unsigned imask;
 +    nbnxn_cj4_t *cj4;
 +    const float *bb_ci;
 +    const real *x_ci;
 +    float *d2l,d2;
 +    int  w;
 +#define PRUNE_LIST_CPU_ONE
 +#ifdef PRUNE_LIST_CPU_ONE
 +    int  ci_last=-1;
 +#endif
 +
 +    d2l = nbl->work->d2;
 +
 +    bb_ci = nbl->work->bb_ci;
 +    x_ci  = nbl->work->x_ci;
 +
 +    na_c = gridj->na_c;
 +
 +    for(cjo=0; cjo<gridj->nsubc[scj]; cjo++)
 +    {
-         subc_bb_dist2_sse_xxxx(gridj->bb+(cj>>STRIDE_8BB_2LOG)*NNBSBB_XXXX+(cj & (STRIDE_8BB-1)),
++        cj4_ind   = (nbl->work->cj_ind >> NBNXN_GPU_JGROUP_SIZE_2LOG);
 +        cj_offset = nbl->work->cj_ind - cj4_ind*NBNXN_GPU_JGROUP_SIZE;
 +        cj4       = &nbl->cj4[cj4_ind];
 +
 +        cj = scj*GPU_NSUBCELL + cjo;
 +
 +        cj_gl = gridj->cell0*GPU_NSUBCELL + cj;
 +
 +        /* Initialize this j-subcell i-subcell list */
 +        cj4->cj[cj_offset] = cj_gl;
 +        imask              = 0;
 +
 +        if (sci_equals_scj)
 +        {
 +            ci1 = cjo + 1;
 +        }
 +        else
 +        {
 +            ci1 = gridi->nsubc[sci];
 +        }
 +
 +#ifdef NBNXN_BBXXXX
 +        /* Determine all ci1 bb distances in one call with SSE */
- #ifdef NBNXN_8BB_SSE
++        subc_bb_dist2_sse_xxxx(gridj->bb+(cj>>STRIDE_PBB_2LOG)*NNBSBB_XXXX+(cj & (STRIDE_PBB-1)),
 +                               ci1,bb_ci,d2l);
 +        *ndistc += na_c*2;
 +#endif
 +
 +        npair = 0;
 +        /* We use a fixed upper-bound instead of ci1 to help optimization */
 +        for(ci=0; ci<GPU_NSUBCELL; ci++)
 +        {
 +            if (ci == ci1)
 +            {
 +                break;
 +            }
 +
 +#ifndef NBNXN_BBXXXX
 +            /* Determine the bb distance between ci and cj */
 +            d2l[ci] = subc_bb_dist2(ci,bb_ci,cj,gridj->bb);
 +            *ndistc += 2;
 +#endif
 +            d2 = d2l[ci];
 +
 +#ifdef PRUNE_LIST_CPU_ALL
 +            /* Check if the distance is within the distance where
 +             * we use only the bounding box distance rbb,
 +             * or within the cut-off and there is at least one atom pair
 +             * within the cut-off. This check is very costly.
 +             */
 +            *ndistc += na_c*na_c;
 +            if (d2 < rbb2 ||
 +                (d2 < rl2 && subc_in_range_x(na_c,ci,x_ci,cj_gl,stride,x,rl2)))
 +#else
 +            /* Check if the distance between the two bounding boxes
 +             * in within the pair-list cut-off.
 +             */
 +            if (d2 < rl2)
 +#endif
 +            {
 +                /* Flag this i-subcell to be taken into account */
 +                imask |= (1U << (cj_offset*GPU_NSUBCELL+ci));
 +
 +#ifdef PRUNE_LIST_CPU_ONE
 +                ci_last = ci;
 +#endif
 +
 +                npair++;
 +            }
 +        }
 +
 +#ifdef PRUNE_LIST_CPU_ONE
 +        /* If we only found 1 pair, check if any atoms are actually
 +         * within the cut-off, so we could get rid of it.
 +         */
 +        if (npair == 1 && d2l[ci_last] >= rbb2)
 +        {
 +            /* Avoid using function pointers here, as it's slower */
 +            if (
-             nbl->sci[nbl->nsci].cj4_ind_end = ((nbl->work->cj_ind+4-1)>>2);
++#ifdef NBNXN_PBB_SSE
 +                !subc_in_range_sse8
 +#else
 +                !subc_in_range_x
 +#endif
 +                                (na_c,ci_last,x_ci,cj_gl,stride,x,rl2))
 +            {
 +                imask &= ~(1U << (cj_offset*GPU_NSUBCELL+ci_last));
 +                npair--;
 +            }
 +        }
 +#endif
 +
 +        if (npair > 0)
 +        {
 +            /* We have a useful sj entry, close it now */
 +
 +            /* Set the exclucions for the ci== sj entry.
 +             * Here we don't bother to check if this entry is actually flagged,
 +             * as it will nearly always be in the list.
 +             */
 +            if (sci_equals_scj)
 +            {
 +                set_self_and_newton_excls_supersub(nbl,cj4_ind,cj_offset,cjo);
 +            }
 +
 +            /* Copy the cluster interaction mask to the list */
 +            for(w=0; w<NWARP; w++)
 +            {
 +                cj4->imei[w].imask |= imask;
 +            }
 +
 +            nbl->work->cj_ind++;
 +
 +            /* Keep the count */
 +            nbl->nci_tot += npair;
 +
 +            /* Increase the closing index in i super-cell list */
- #ifdef NBNXN_SEARCH_SSE
++            nbl->sci[nbl->nsci].cj4_ind_end =
++                ((nbl->work->cj_ind+NBNXN_GPU_JGROUP_SIZE-1) >> NBNXN_GPU_JGROUP_SIZE_2LOG);
 +        }
 +    }
 +}
 +
 +/* Set all atom-pair exclusions from the topology stored in excl
 + * as masks in the pair-list for simple list i-entry nbl_ci
 + */
 +static void set_ci_top_excls(const nbnxn_search_t nbs,
 +                             nbnxn_pairlist_t *nbl,
 +                             gmx_bool diagRemoved,
 +                             int na_ci_2log,
 +                             int na_cj_2log,
 +                             const nbnxn_ci_t *nbl_ci,
 +                             const t_blocka *excl)
 +{
 +    const int *cell;
 +    int ci;
 +    int cj_ind_first,cj_ind_last;
 +    int cj_first,cj_last;
 +    int ndirect;
 +    int i,ai,aj,si,eind,ge,se;
 +    int found,cj_ind_0,cj_ind_1,cj_ind_m;
 +    int cj_m;
 +    gmx_bool Found_si;
 +    int si_ind;
 +    nbnxn_excl_t *nbl_excl;
 +    int inner_i,inner_e;
 +
 +    cell = nbs->cell;
 +
 +    if (nbl_ci->cj_ind_end == nbl_ci->cj_ind_start)
 +    {
 +        /* Empty list */
 +        return;
 +    }
 +
 +    ci = nbl_ci->ci;
 +
 +    cj_ind_first = nbl_ci->cj_ind_start;
 +    cj_ind_last  = nbl->ncj - 1;
 +
 +    cj_first = nbl->cj[cj_ind_first].cj;
 +    cj_last  = nbl->cj[cj_ind_last].cj;
 +
 +    /* Determine how many contiguous j-cells we have starting
 +     * from the first i-cell. This number can be used to directly
 +     * calculate j-cell indices for excluded atoms.
 +     */
 +    ndirect = 0;
 +    if (na_ci_2log == na_cj_2log)
 +    {
 +        while (cj_ind_first + ndirect <= cj_ind_last &&
 +               nbl->cj[cj_ind_first+ndirect].cj == ci + ndirect)
 +        {
 +            ndirect++;
 +        }
 +    }
- #define AMODI(a)  ((a) & (NBNXN_CPU_CLUSTER_I_SIZE - 1))
++#ifdef NBNXN_SEARCH_BB_SSE
 +    else
 +    {
 +        while (cj_ind_first + ndirect <= cj_ind_last &&
 +               nbl->cj[cj_ind_first+ndirect].cj == ci_to_cj(na_cj_2log,ci) + ndirect)
 +        {
 +            ndirect++;
 +        }
 +    }
 +#endif
 +
 +    /* Loop over the atoms in the i super-cell */
 +    for(i=0; i<nbl->na_sc; i++)
 +    {
 +        ai = nbs->a[ci*nbl->na_sc+i];
 +        if (ai >= 0)
 +        {
 +            si  = (i>>na_ci_2log);
 +
 +            /* Loop over the topology-based exclusions for this i-atom */
 +            for(eind=excl->index[ai]; eind<excl->index[ai+1]; eind++)
 +            {
 +                aj = excl->a[eind];
 +
 +                if (aj == ai)
 +                {
 +                    /* The self exclusion are already set, save some time */
 +                    continue;
 +                }
 +
 +                ge = cell[aj];
 +
 +                /* Without shifts we only calculate interactions j>i
 +                 * for one-way pair-lists.
 +                 */
 +                if (diagRemoved && ge <= ci*nbl->na_sc + i)
 +                {
 +                    continue;
 +                }
 +
 +                se = (ge >> na_cj_2log);
 +
 +                /* Could the cluster se be in our list? */
 +                if (se >= cj_first && se <= cj_last)
 +                {
 +                    if (se < cj_first + ndirect)
 +                    {
 +                        /* We can calculate cj_ind directly from se */
 +                        found = cj_ind_first + se - cj_first;
 +                    }
 +                    else
 +                    {
 +                        /* Search for se using bisection */
 +                        found = -1;
 +                        cj_ind_0 = cj_ind_first + ndirect;
 +                        cj_ind_1 = cj_ind_last + 1;
 +                        while (found == -1 && cj_ind_0 < cj_ind_1)
 +                        {
 +                            cj_ind_m = (cj_ind_0 + cj_ind_1)>>1;
 +
 +                            cj_m = nbl->cj[cj_ind_m].cj;
 +
 +                            if (se == cj_m)
 +                            {
 +                                found = cj_ind_m;
 +                            }
 +                            else if (se < cj_m)
 +                            {
 +                                cj_ind_1 = cj_ind_m;
 +                            }
 +                            else
 +                            {
 +                                cj_ind_0 = cj_ind_m + 1;
 +                            }
 +                        }
 +                    }
 +
 +                    if (found >= 0)
 +                    {
 +                        inner_i = i  - (si << na_ci_2log);
 +                        inner_e = ge - (se << na_cj_2log);
 +
 +                        nbl->cj[found].excl &= ~(1U<<((inner_i<<na_cj_2log) + inner_e));
 +                    }
 +                }
 +            }
 +        }
 +    }
 +}
 +
 +/* Set all atom-pair exclusions from the topology stored in excl
 + * as masks in the pair-list for i-super-cell entry nbl_sci
 + */
 +static void set_sci_top_excls(const nbnxn_search_t nbs,
 +                              nbnxn_pairlist_t *nbl,
 +                              gmx_bool diagRemoved,
 +                              int na_c_2log,
 +                              const nbnxn_sci_t *nbl_sci,
 +                              const t_blocka *excl)
 +{
 +    const int *cell;
 +    int na_c;
 +    int sci;
 +    int cj_ind_first,cj_ind_last;
 +    int cj_first,cj_last;
 +    int ndirect;
 +    int i,ai,aj,si,eind,ge,se;
 +    int found,cj_ind_0,cj_ind_1,cj_ind_m;
 +    int cj_m;
 +    gmx_bool Found_si;
 +    int si_ind;
 +    nbnxn_excl_t *nbl_excl;
 +    int inner_i,inner_e,w;
 +
 +    cell = nbs->cell;
 +
 +    na_c = nbl->na_ci;
 +
 +    if (nbl_sci->cj4_ind_end == nbl_sci->cj4_ind_start)
 +    {
 +        /* Empty list */
 +        return;
 +    }
 +
 +    sci = nbl_sci->sci;
 +
 +    cj_ind_first = nbl_sci->cj4_ind_start*NBNXN_GPU_JGROUP_SIZE;
 +    cj_ind_last  = nbl->work->cj_ind - 1;
 +
 +    cj_first = nbl->cj4[nbl_sci->cj4_ind_start].cj[0];
 +    cj_last  = nbl_cj(nbl,cj_ind_last);
 +
 +    /* Determine how many contiguous j-clusters we have starting
 +     * from the first i-cluster. This number can be used to directly
 +     * calculate j-cluster indices for excluded atoms.
 +     */
 +    ndirect = 0;
 +    while (cj_ind_first + ndirect <= cj_ind_last &&
 +           nbl_cj(nbl,cj_ind_first+ndirect) == sci*GPU_NSUBCELL + ndirect)
 +    {
 +        ndirect++;
 +    }
 +
 +    /* Loop over the atoms in the i super-cell */
 +    for(i=0; i<nbl->na_sc; i++)
 +    {
 +        ai = nbs->a[sci*nbl->na_sc+i];
 +        if (ai >= 0)
 +        {
 +            si  = (i>>na_c_2log);
 +
 +            /* Loop over the topology-based exclusions for this i-atom */
 +            for(eind=excl->index[ai]; eind<excl->index[ai+1]; eind++)
 +            {
 +                aj = excl->a[eind];
 +
 +                if (aj == ai)
 +                {
 +                    /* The self exclusion are already set, save some time */
 +                    continue;
 +                }
 +
 +                ge = cell[aj];
 +
 +                /* Without shifts we only calculate interactions j>i
 +                 * for one-way pair-lists.
 +                 */
 +                if (diagRemoved && ge <= sci*nbl->na_sc + i)
 +                {
 +                    continue;
 +                }
 +
 +                se = ge>>na_c_2log;
 +                /* Could the cluster se be in our list? */
 +                if (se >= cj_first && se <= cj_last)
 +                {
 +                    if (se < cj_first + ndirect)
 +                    {
 +                        /* We can calculate cj_ind directly from se */
 +                        found = cj_ind_first + se - cj_first;
 +                    }
 +                    else
 +                    {
 +                        /* Search for se using bisection */
 +                        found = -1;
 +                        cj_ind_0 = cj_ind_first + ndirect;
 +                        cj_ind_1 = cj_ind_last + 1;
 +                        while (found == -1 && cj_ind_0 < cj_ind_1)
 +                        {
 +                            cj_ind_m = (cj_ind_0 + cj_ind_1)>>1;
 +
 +                            cj_m = nbl_cj(nbl,cj_ind_m);
 +
 +                            if (se == cj_m)
 +                            {
 +                                found = cj_ind_m;
 +                            }
 +                            else if (se < cj_m)
 +                            {
 +                                cj_ind_1 = cj_ind_m;
 +                            }
 +                            else
 +                            {
 +                                cj_ind_0 = cj_ind_m + 1;
 +                            }
 +                        }
 +                    }
 +
 +                    if (found >= 0)
 +                    {
 +                        inner_i = i  - si*na_c;
 +                        inner_e = ge - se*na_c;
 +
 +/* Macro for getting the index of atom a within a cluster */
- #define A2CI(a)   ((a) >> NBNXN_CPU_CLUSTER_I_SIZE_2LOG)
++#define AMODCJ4(a)  ((a) & (NBNXN_GPU_JGROUP_SIZE - 1))
 +/* Macro for converting an atom number to a cluster number */
-                         if (nbl_imask0(nbl,found) & (1U << (AMODI(found)*GPU_NSUBCELL + si)))
++#define A2CJ4(a)    ((a) >> NBNXN_GPU_JGROUP_SIZE_2LOG)
++/* Macro for getting the index of an i-atom within a warp */
++#define AMODWI(a)   ((a) & (NBNXN_GPU_CLUSTER_SIZE/2 - 1))
 +
-                             get_nbl_exclusions_1(nbl,A2CI(found),w,&nbl_excl);
++                        if (nbl_imask0(nbl,found) & (1U << (AMODCJ4(found)*GPU_NSUBCELL + si)))
 +                        {
 +                            w       = (inner_e >> 2);
 +
-                             nbl_excl->pair[AMODI(inner_e)*nbl->na_ci+inner_i] &=
-                                 ~(1U << (AMODI(found)*GPU_NSUBCELL + si));
++                            get_nbl_exclusions_1(nbl,A2CJ4(found),w,&nbl_excl);
 +
- #undef AMODI
- #undef A2CI
++                            nbl_excl->pair[AMODWI(inner_e)*nbl->na_ci+inner_i] &=
++                                ~(1U << (AMODCJ4(found)*GPU_NSUBCELL + si));
 +                        }
 +
-         if (nbl->ci[nbl->nci].shift & NBNXN_CI_HALF_LJ(0))
++#undef AMODCJ4
++#undef A2CJ4
++#undef AMODWI
 +                    }
 +                }
 +            }
 +        }
 +    }
 +}
 +
 +/* Reallocate the simple ci list for at least n entries */
 +static void nb_realloc_ci(nbnxn_pairlist_t *nbl,int n)
 +{
 +    nbl->ci_nalloc = over_alloc_small(n);
 +    nbnxn_realloc_void((void **)&nbl->ci,
 +                       nbl->nci*sizeof(*nbl->ci),
 +                       nbl->ci_nalloc*sizeof(*nbl->ci),
 +                       nbl->alloc,nbl->free);
 +}
 +
 +/* Reallocate the super-cell sci list for at least n entries */
 +static void nb_realloc_sci(nbnxn_pairlist_t *nbl,int n)
 +{
 +    nbl->sci_nalloc = over_alloc_small(n);
 +    nbnxn_realloc_void((void **)&nbl->sci,
 +                       nbl->nsci*sizeof(*nbl->sci),
 +                       nbl->sci_nalloc*sizeof(*nbl->sci),
 +                       nbl->alloc,nbl->free);
 +}
 +
 +/* Make a new ci entry at index nbl->nci */
 +static void new_ci_entry(nbnxn_pairlist_t *nbl,int ci,int shift,int flags,
 +                         nbnxn_list_work_t *work)
 +{
 +    if (nbl->nci + 1 > nbl->ci_nalloc)
 +    {
 +        nb_realloc_ci(nbl,nbl->nci+1);
 +    }
 +    nbl->ci[nbl->nci].ci            = ci;
 +    nbl->ci[nbl->nci].shift         = shift;
 +    /* Store the interaction flags along with the shift */
 +    nbl->ci[nbl->nci].shift        |= flags;
 +    nbl->ci[nbl->nci].cj_ind_start  = nbl->ncj;
 +    nbl->ci[nbl->nci].cj_ind_end    = nbl->ncj;
 +}
 +
 +/* Make a new sci entry at index nbl->nsci */
 +static void new_sci_entry(nbnxn_pairlist_t *nbl,int sci,int shift,int flags,
 +                          nbnxn_list_work_t *work)
 +{
 +    if (nbl->nsci + 1 > nbl->sci_nalloc)
 +    {
 +        nb_realloc_sci(nbl,nbl->nsci+1);
 +    }
 +    nbl->sci[nbl->nsci].sci           = sci;
 +    nbl->sci[nbl->nsci].shift         = shift;
 +    nbl->sci[nbl->nsci].cj4_ind_start = nbl->ncj4;
 +    nbl->sci[nbl->nsci].cj4_ind_end   = nbl->ncj4;
 +}
 +
 +/* Sort the simple j-list cj on exclusions.
 + * Entries with exclusions will all be sorted to the beginning of the list.
 + */
 +static void sort_cj_excl(nbnxn_cj_t *cj,int ncj,
 +                         nbnxn_list_work_t *work)
 +{
 +    int jnew,j;
 +
 +    if (ncj > work->cj_nalloc)
 +    {
 +        work->cj_nalloc = over_alloc_large(ncj);
 +        srenew(work->cj,work->cj_nalloc);
 +    }
 +
 +    /* Make a list of the j-cells involving exclusions */
 +    jnew = 0;
 +    for(j=0; j<ncj; j++)
 +    {
 +        if (cj[j].excl != NBNXN_INT_MASK_ALL)
 +        {
 +            work->cj[jnew++] = cj[j];
 +        }
 +    }
 +    /* Check if there are exclusions at all or not just the first entry */
 +    if (!((jnew == 0) ||
 +          (jnew == 1 && cj[0].excl != NBNXN_INT_MASK_ALL)))
 +    {
 +        for(j=0; j<ncj; j++)
 +        {
 +            if (cj[j].excl == NBNXN_INT_MASK_ALL)
 +            {
 +                work->cj[jnew++] = cj[j];
 +            }
 +        }
 +        for(j=0; j<ncj; j++)
 +        {
 +            cj[j] = work->cj[j];
 +        }
 +    }
 +}
 +
 +/* Close this simple list i entry */
 +static void close_ci_entry_simple(nbnxn_pairlist_t *nbl)
 +{
 +    int jlen;
 +
 +    /* All content of the new ci entry have already been filled correctly,
 +     * we only need to increase the count here (for non empty lists).
 +     */
 +    jlen = nbl->ci[nbl->nci].cj_ind_end - nbl->ci[nbl->nci].cj_ind_start;
 +    if (jlen > 0)
 +    {
 +        sort_cj_excl(nbl->cj+nbl->ci[nbl->nci].cj_ind_start,jlen,nbl->work);
 +
-             nbl->work->ncj_hlj += jlen;
++        /* The counts below are used for non-bonded pair/flop counts
++         * and should therefore match the available kernel setups.
++         */
++        if (!(nbl->ci[nbl->nci].shift & NBNXN_CI_DO_COUL(0)))
 +        {
-         else if (!(nbl->ci[nbl->nci].shift & NBNXN_CI_DO_COUL(0)))
++            nbl->work->ncj_noq += jlen;
 +        }
-             nbl->work->ncj_noq += jlen;
++        else if ((nbl->ci[nbl->nci].shift & NBNXN_CI_HALF_LJ(0)) ||
++                 !(nbl->ci[nbl->nci].shift & NBNXN_CI_DO_LJ(0)))
 +        {
-         nbl->ncj4         = ((nbl->work->cj_ind + 4-1) >> 2);
++            nbl->work->ncj_hlj += jlen;
 +        }
 +
 +        nbl->nci++;
 +    }
 +}
 +
 +/* Split sci entry for load balancing on the GPU.
 + * As we only now the current count on our own thread,
 + * we will need to estimate the current total amount of i-entries.
 + * As the lists get concatenated later, this estimate depends
 + * both on nthread and our own thread index thread.
 + */
 +static void split_sci_entry(nbnxn_pairlist_t *nbl,
 +                            int nsp_max_av,gmx_bool progBal,int nc_bal,
 +                            int thread,int nthread)
 +{
 +    int nsci_est;
 +    int nsp_max;
 +    int cj4_start,cj4_end,j4len,cj4;
 +    int sci;
 +    int nsp,nsp_sci,nsp_cj4,nsp_cj4_e,nsp_cj4_p;
 +    int p;
 +
 +    /* Estimate the total numbers of ci's of the nblist combined
 +     * over all threads using the target number of ci's.
 +     */
 +    nsci_est = nc_bal*thread/nthread + nbl->nsci;
 +    if (progBal)
 +    {
 +        /* The first ci blocks should be larger, to avoid overhead.
 +         * The last ci blocks should be smaller, to improve load balancing.
 +         */
 +        nsp_max = max(1,
 +                      nsp_max_av*nc_bal*3/(2*(nsci_est - 1 + nc_bal)));
 +    }
 +    else
 +    {
 +        nsp_max = nsp_max_av;
 +    }
 +
 +    cj4_start = nbl->sci[nbl->nsci-1].cj4_ind_start;
 +    cj4_end   = nbl->sci[nbl->nsci-1].cj4_ind_end;
 +    j4len = cj4_end - cj4_start;
 +
 +    if (j4len > 1 && j4len*GPU_NSUBCELL*NBNXN_GPU_JGROUP_SIZE > nsp_max)
 +    {
 +        /* Remove the last ci entry and process the cj4's again */
 +        nbl->nsci -= 1;
 +
 +        sci        = nbl->nsci;
 +        cj4        = cj4_start;
 +        nsp        = 0;
 +        nsp_sci    = 0;
 +        nsp_cj4_e  = 0;
 +        nsp_cj4    = 0;
 +        while (cj4 < cj4_end)
 +        {
 +            nsp_cj4_p = nsp_cj4;
 +            nsp_cj4   = 0;
 +            for(p=0; p<GPU_NSUBCELL*NBNXN_GPU_JGROUP_SIZE; p++)
 +            {
 +                nsp_cj4 += (nbl->cj4[cj4].imei[0].imask >> p) & 1;
 +            }
 +            nsp += nsp_cj4;
 +
 +            if (nsp > nsp_max && nsp > nsp_cj4)
 +            {
 +                nbl->sci[sci].cj4_ind_end = cj4;
 +                sci++;
 +                nbl->nsci++;
 +                if (nbl->nsci+1 > nbl->sci_nalloc)
 +                {
 +                    nb_realloc_sci(nbl,nbl->nsci+1);
 +                }
 +                nbl->sci[sci].sci           = nbl->sci[nbl->nsci-1].sci;
 +                nbl->sci[sci].shift         = nbl->sci[nbl->nsci-1].shift;
 +                nbl->sci[sci].cj4_ind_start = cj4;
 +                nsp_sci   = nsp - nsp_cj4;
 +                nsp_cj4_e = nsp_cj4_p;
 +                nsp       = nsp_cj4;
 +            }
 +
 +            cj4++;
 +        }
 +
 +        /* Put the remaining cj4's in a new ci entry */
 +        nbl->sci[sci].cj4_ind_end = cj4_end;
 +
 +        /* Possibly balance out the last two ci's
 +         * by moving the last cj4 of the second last ci.
 +         */
 +        if (nsp_sci - nsp_cj4_e >= nsp + nsp_cj4_e)
 +        {
 +            nbl->sci[sci-1].cj4_ind_end--;
 +            nbl->sci[sci].cj4_ind_start--;
 +        }
 +
 +        sci++;
 +        nbl->nsci++;
 +    }
 +}
 +
 +/* Clost this super/sub list i entry */
 +static void close_ci_entry_supersub(nbnxn_pairlist_t *nbl,
 +                                    int nsp_max_av,
 +                                    gmx_bool progBal,int nc_bal,
 +                                    int thread,int nthread)
 +{
 +    int j4len,tlen;
 +    int nb,b;
 +
 +    /* All content of the new ci entry have already been filled correctly,
 +     * we only need to increase the count here (for non empty lists).
 +     */
 +    j4len = nbl->sci[nbl->nsci].cj4_ind_end - nbl->sci[nbl->nsci].cj4_ind_start;
 +    if (j4len > 0)
 +    {
 +        /* We can only have complete blocks of 4 j-entries in a list,
 +         * so round the count up before closing.
 +         */
-     ia = ci*(GPU_NSUBCELL>>STRIDE_8BB_2LOG)*NNBSBB_XXXX;
-     for(m=0; m<(GPU_NSUBCELL>>STRIDE_8BB_2LOG)*NNBSBB_XXXX; m+=NNBSBB_XXXX)
++        nbl->ncj4         = ((nbl->work->cj_ind + NBNXN_GPU_JGROUP_SIZE - 1) >> NBNXN_GPU_JGROUP_SIZE_2LOG);
 +        nbl->work->cj_ind = nbl->ncj4*NBNXN_GPU_JGROUP_SIZE;
 +
 +        nbl->nsci++;
 +
 +        if (nsp_max_av > 0)
 +        {
 +            split_sci_entry(nbl,nsp_max_av,progBal,nc_bal,thread,nthread);
 +        }
 +    }
 +}
 +
 +/* Syncs the working array before adding another grid pair to the list */
 +static void sync_work(nbnxn_pairlist_t *nbl)
 +{
 +    if (!nbl->bSimple)
 +    {
 +        nbl->work->cj_ind   = nbl->ncj4*NBNXN_GPU_JGROUP_SIZE;
 +        nbl->work->cj4_init = nbl->ncj4;
 +    }
 +}
 +
 +/* Clears an nbnxn_pairlist_t data structure */
 +static void clear_pairlist(nbnxn_pairlist_t *nbl)
 +{
 +    nbl->nci           = 0;
 +    nbl->nsci          = 0;
 +    nbl->ncj           = 0;
 +    nbl->ncj4          = 0;
 +    nbl->nci_tot       = 0;
 +    nbl->nexcl         = 1;
 +
 +    nbl->work->ncj_noq = 0;
 +    nbl->work->ncj_hlj = 0;
 +}
 +
 +/* Sets a simple list i-cell bounding box, including PBC shift */
 +static void set_icell_bb_simple(const float *bb,int ci,
 +                                real shx,real shy,real shz,
 +                                float *bb_ci)
 +{
 +    int ia;
 +
 +    ia = ci*NNBSBB_B;
 +    bb_ci[BBL_X] = bb[ia+BBL_X] + shx;
 +    bb_ci[BBL_Y] = bb[ia+BBL_Y] + shy;
 +    bb_ci[BBL_Z] = bb[ia+BBL_Z] + shz;
 +    bb_ci[BBU_X] = bb[ia+BBU_X] + shx;
 +    bb_ci[BBU_Y] = bb[ia+BBU_Y] + shy;
 +    bb_ci[BBU_Z] = bb[ia+BBU_Z] + shz;
 +}
 +
 +/* Sets a super-cell and sub cell bounding boxes, including PBC shift */
 +static void set_icell_bb_supersub(const float *bb,int ci,
 +                                  real shx,real shy,real shz,
 +                                  float *bb_ci)
 +{
 +    int ia,m,i;
 +
 +#ifdef NBNXN_BBXXXX
-         for(i=0; i<STRIDE_8BB; i++)
++    ia = ci*(GPU_NSUBCELL>>STRIDE_PBB_2LOG)*NNBSBB_XXXX;
++    for(m=0; m<(GPU_NSUBCELL>>STRIDE_PBB_2LOG)*NNBSBB_XXXX; m+=NNBSBB_XXXX)
 +    {
-             bb_ci[m+0*STRIDE_8BB+i] = bb[ia+m+0*STRIDE_8BB+i] + shx;
-             bb_ci[m+1*STRIDE_8BB+i] = bb[ia+m+1*STRIDE_8BB+i] + shy;
-             bb_ci[m+2*STRIDE_8BB+i] = bb[ia+m+2*STRIDE_8BB+i] + shz;
-             bb_ci[m+3*STRIDE_8BB+i] = bb[ia+m+3*STRIDE_8BB+i] + shx;
-             bb_ci[m+4*STRIDE_8BB+i] = bb[ia+m+4*STRIDE_8BB+i] + shy;
-             bb_ci[m+5*STRIDE_8BB+i] = bb[ia+m+5*STRIDE_8BB+i] + shz;
++        for(i=0; i<STRIDE_PBB; i++)
 +        {
- #ifdef NBNXN_SEARCH_SSE
++            bb_ci[m+0*STRIDE_PBB+i] = bb[ia+m+0*STRIDE_PBB+i] + shx;
++            bb_ci[m+1*STRIDE_PBB+i] = bb[ia+m+1*STRIDE_PBB+i] + shy;
++            bb_ci[m+2*STRIDE_PBB+i] = bb[ia+m+2*STRIDE_PBB+i] + shz;
++            bb_ci[m+3*STRIDE_PBB+i] = bb[ia+m+3*STRIDE_PBB+i] + shx;
++            bb_ci[m+4*STRIDE_PBB+i] = bb[ia+m+4*STRIDE_PBB+i] + shy;
++            bb_ci[m+5*STRIDE_PBB+i] = bb[ia+m+5*STRIDE_PBB+i] + shz;
 +        }
 +    }
 +#else
 +    ia = ci*GPU_NSUBCELL*NNBSBB_B;
 +    for(i=0; i<GPU_NSUBCELL*NNBSBB_B; i+=NNBSBB_B)
 +    {
 +        bb_ci[i+BBL_X] = bb[ia+i+BBL_X] + shx;
 +        bb_ci[i+BBL_Y] = bb[ia+i+BBL_Y] + shy;
 +        bb_ci[i+BBL_Z] = bb[ia+i+BBL_Z] + shz;
 +        bb_ci[i+BBU_X] = bb[ia+i+BBU_X] + shx;
 +        bb_ci[i+BBU_Y] = bb[ia+i+BBU_Y] + shy;
 +        bb_ci[i+BBU_Z] = bb[ia+i+BBU_Z] + shz;
 +    }
 +#endif
 +}
 +
 +/* Copies PBC shifted i-cell atom coordinates x,y,z to working array */
 +static void icell_set_x_simple(int ci,
 +                               real shx,real shy,real shz,
 +                               int na_c,
 +                               int stride,const real *x,
 +                               nbnxn_list_work_t *work)
 +{
 +    int  ia,i;
 +
 +    ia = ci*NBNXN_CPU_CLUSTER_I_SIZE;
 +
 +    for(i=0; i<NBNXN_CPU_CLUSTER_I_SIZE; i++)
 +    {
 +        work->x_ci[i*STRIDE_XYZ+XX] = x[(ia+i)*stride+XX] + shx;
 +        work->x_ci[i*STRIDE_XYZ+YY] = x[(ia+i)*stride+YY] + shy;
 +        work->x_ci[i*STRIDE_XYZ+ZZ] = x[(ia+i)*stride+ZZ] + shz;
 +    }
 +}
 +
 +/* Copies PBC shifted super-cell atom coordinates x,y,z to working array */
 +static void icell_set_x_supersub(int ci,
 +                                 real shx,real shy,real shz,
 +                                 int na_c,
 +                                 int stride,const real *x,
 +                                 nbnxn_list_work_t *work)
 +{
 +    int  ia,i;
 +    real *x_ci;
 +
 +    x_ci = work->x_ci;
 +
 +    ia = ci*GPU_NSUBCELL*na_c;
 +    for(i=0; i<GPU_NSUBCELL*na_c; i++)
 +    {
 +        x_ci[i*DIM + XX] = x[(ia+i)*stride + XX] + shx;
 +        x_ci[i*DIM + YY] = x[(ia+i)*stride + YY] + shy;
 +        x_ci[i*DIM + ZZ] = x[(ia+i)*stride + ZZ] + shz;
 +    }
 +}
 +
-         for(i=0; i<na_c; i+=STRIDE_8BB)
++#ifdef NBNXN_SEARCH_BB_SSE
 +/* Copies PBC shifted super-cell packed atom coordinates to working array */
 +static void icell_set_x_supersub_sse8(int ci,
 +                                      real shx,real shy,real shz,
 +                                      int na_c,
 +                                      int stride,const real *x,
 +                                      nbnxn_list_work_t *work)
 +{
 +    int  si,io,ia,i,j;
 +    real *x_ci;
 +
 +    x_ci = work->x_ci;
 +
 +    for(si=0; si<GPU_NSUBCELL; si++)
 +    {
-             for(j=0; j<STRIDE_8BB; j++)
++        for(i=0; i<na_c; i+=STRIDE_PBB)
 +        {
 +            io = si*na_c + i;
 +            ia = ci*GPU_NSUBCELL*na_c + io;
-                 x_ci[io*DIM + j + XX*STRIDE_8BB] = x[(ia+j)*stride+XX] + shx;
-                 x_ci[io*DIM + j + YY*STRIDE_8BB] = x[(ia+j)*stride+YY] + shy;
-                 x_ci[io*DIM + j + ZZ*STRIDE_8BB] = x[(ia+j)*stride+ZZ] + shz;
++            for(j=0; j<STRIDE_PBB; j++)
 +            {
-             for(j=0; j<4; j++)
++                x_ci[io*DIM + j + XX*STRIDE_PBB] = x[(ia+j)*stride+XX] + shx;
++                x_ci[io*DIM + j + YY*STRIDE_PBB] = x[(ia+j)*stride+YY] + shy;
++                x_ci[io*DIM + j + ZZ*STRIDE_PBB] = x[(ia+j)*stride+ZZ] + shz;
 +            }
 +        }
 +    }
 +}
 +#endif
 +
 +static real nbnxn_rlist_inc_nonloc_fac = 0.6;
 +
 +/* Due to the cluster size the effective pair-list is longer than
 + * that of a simple atom pair-list. This function gives the extra distance.
 + */
 +real nbnxn_get_rlist_effective_inc(int cluster_size,real atom_density)
 +{
 +    return ((0.5 + nbnxn_rlist_inc_nonloc_fac)*sqr(((cluster_size) - 1.0)/(cluster_size))*pow((cluster_size)/(atom_density),1.0/3.0));
 +}
 +
 +/* Estimates the interaction volume^2 for non-local interactions */
 +static real nonlocal_vol2(const gmx_domdec_zones_t *zones,rvec ls,real r)
 +{
 +    int  z,d;
 +    real cl,ca,za;
 +    real vold_est;
 +    real vol2_est_tot;
 +
 +    vol2_est_tot = 0;
 +
 +    /* Here we simply add up the volumes of 1, 2 or 3 1D decomposition
 +     * not home interaction volume^2. As these volumes are not additive,
 +     * this is an overestimate, but it would only be significant in the limit
 +     * of small cells, where we anyhow need to split the lists into
 +     * as small parts as possible.
 +     */
 +
 +    for(z=0; z<zones->n; z++)
 +    {
 +        if (zones->shift[z][XX] + zones->shift[z][YY] + zones->shift[z][ZZ] == 1)
 +        {
 +            cl = 0;
 +            ca = 1;
 +            za = 1;
 +            for(d=0; d<DIM; d++)
 +            {
 +                if (zones->shift[z][d] == 0)
 +                {
 +                    cl += 0.5*ls[d];
 +                    ca *= ls[d];
 +                    za *= zones->size[z].x1[d] - zones->size[z].x0[d];
 +                }
 +            }
 +
 +            /* 4 octants of a sphere */
 +            vold_est  = 0.25*M_PI*r*r*r*r;
 +            /* 4 quarter pie slices on the edges */
 +            vold_est += 4*cl*M_PI/6.0*r*r*r;
 +            /* One rectangular volume on a face */
 +            vold_est += ca*0.5*r*r;
 +
 +            vol2_est_tot += vold_est*za;
 +        }
 +    }
 +
 +    return vol2_est_tot;
 +}
 +
 +/* Estimates the average size of a full j-list for super/sub setup */
 +static int get_nsubpair_max(const nbnxn_search_t nbs,
 +                            int iloc,
 +                            real rlist,
 +                            int min_ci_balanced)
 +{
 +    const nbnxn_grid_t *grid;
 +    rvec ls;
 +    real xy_diag2,r_eff_sup,vol_est,nsp_est,nsp_est_nl;
 +    int  nsubpair_max;
 +
 +    grid = &nbs->grid[0];
 +
 +    ls[XX] = (grid->c1[XX] - grid->c0[XX])/(grid->ncx*GPU_NSUBCELL_X);
 +    ls[YY] = (grid->c1[YY] - grid->c0[YY])/(grid->ncy*GPU_NSUBCELL_Y);
 +    ls[ZZ] = (grid->c1[ZZ] - grid->c0[ZZ])*grid->ncx*grid->ncy/(grid->nc*GPU_NSUBCELL_Z);
 +
 +    /* The average squared length of the diagonal of a sub cell */
 +    xy_diag2 = ls[XX]*ls[XX] + ls[YY]*ls[YY] + ls[ZZ]*ls[ZZ];
 +
 +    /* The formulas below are a heuristic estimate of the average nsj per si*/
 +    r_eff_sup = rlist + nbnxn_rlist_inc_nonloc_fac*sqr((grid->na_c - 1.0)/grid->na_c)*sqrt(xy_diag2/3);
 +
 +    if (!nbs->DomDec || nbs->zones->n == 1)
 +    {
 +        nsp_est_nl = 0;
 +    }
 +    else
 +    {
 +        nsp_est_nl =
 +            sqr(grid->atom_density/grid->na_c)*
 +            nonlocal_vol2(nbs->zones,ls,r_eff_sup);
 +    }
 +
 +    if (LOCAL_I(iloc))
 +    {
 +        /* Sub-cell interacts with itself */
 +        vol_est  = ls[XX]*ls[YY]*ls[ZZ];
 +        /* 6/2 rectangular volume on the faces */
 +        vol_est += (ls[XX]*ls[YY] + ls[XX]*ls[ZZ] + ls[YY]*ls[ZZ])*r_eff_sup;
 +        /* 12/2 quarter pie slices on the edges */
 +        vol_est += 2*(ls[XX] + ls[YY] + ls[ZZ])*0.25*M_PI*sqr(r_eff_sup);
 +        /* 4 octants of a sphere */
 +        vol_est += 0.5*4.0/3.0*M_PI*pow(r_eff_sup,3);
 +
 +        nsp_est = grid->nsubc_tot*vol_est*grid->atom_density/grid->na_c;
 +
 +        /* Subtract the non-local pair count */
 +        nsp_est -= nsp_est_nl;
 +
 +        if (debug)
 +        {
 +            fprintf(debug,"nsp_est local %5.1f non-local %5.1f\n",
 +                    nsp_est,nsp_est_nl);
 +        }
 +    }
 +    else
 +    {
 +        nsp_est = nsp_est_nl;
 +    }
 +
 +    if (min_ci_balanced <= 0 || grid->nc >= min_ci_balanced || grid->nc == 0)
 +    {
 +        /* We don't need to worry */
 +        nsubpair_max = -1;
 +    }
 +    else
 +    {
 +        /* Thus the (average) maximum j-list size should be as follows */
 +        nsubpair_max = max(1,(int)(nsp_est/min_ci_balanced+0.5));
 +
 +        /* Since the target value is a maximum (this avoid high outliers,
 +         * which lead to load imbalance), not average, we get more lists
 +         * than we ask for (to compensate we need to add GPU_NSUBCELL*4/4).
 +         * But more importantly, the optimal GPU performance moves
 +         * to lower number of block for very small blocks.
 +         * To compensate we add the maximum pair count per cj4.
 +         */
 +        nsubpair_max += GPU_NSUBCELL*NBNXN_CPU_CLUSTER_I_SIZE;
 +    }
 +
 +    if (debug)
 +    {
 +        fprintf(debug,"nbl nsp estimate %.1f, nsubpair_max %d\n",
 +                nsp_est,nsubpair_max);
 +    }
 +
 +    return nsubpair_max;
 +}
 +
 +/* Debug list print function */
 +static void print_nblist_ci_cj(FILE *fp,const nbnxn_pairlist_t *nbl)
 +{
 +    int i,j;
 +
 +    for(i=0; i<nbl->nci; i++)
 +    {
 +        fprintf(fp,"ci %4d  shift %2d  ncj %3d\n",
 +                nbl->ci[i].ci,nbl->ci[i].shift,
 +                nbl->ci[i].cj_ind_end - nbl->ci[i].cj_ind_start);
 +
 +        for(j=nbl->ci[i].cj_ind_start; j<nbl->ci[i].cj_ind_end; j++)
 +        {
 +            fprintf(fp,"  cj %5d  imask %x\n",
 +                    nbl->cj[j].cj,
 +                    nbl->cj[j].excl);
 +        }
 +    }
 +}
 +
 +/* Debug list print function */
 +static void print_nblist_sci_cj(FILE *fp,const nbnxn_pairlist_t *nbl)
 +{
 +    int i,j4,j;
 +
 +    for(i=0; i<nbl->nsci; i++)
 +    {
 +        fprintf(fp,"ci %4d  shift %2d  ncj4 %2d\n",
 +                nbl->sci[i].sci,nbl->sci[i].shift,
 +                nbl->sci[i].cj4_ind_end - nbl->sci[i].cj4_ind_start);
 +
 +        for(j4=nbl->sci[i].cj4_ind_start; j4<nbl->sci[i].cj4_ind_end; j4++)
 +        {
- #ifdef NBNXN_SEARCH_SSE
++            for(j=0; j<NBNXN_GPU_JGROUP_SIZE; j++)
 +            {
 +                fprintf(fp,"  sj %5d  imask %x\n",
 +                        nbl->cj4[j4].cj[j],
 +                        nbl->cj4[j4].imei[0].imask);
 +            }
 +        }
 +    }
 +}
 +
 +/* Combine pair lists *nbl generated on multiple threads nblc */
 +static void combine_nblists(int nnbl,nbnxn_pairlist_t **nbl,
 +                            nbnxn_pairlist_t *nblc)
 +{
 +    int nsci,ncj4,nexcl;
 +    int n,i;
 +
 +    if (nblc->bSimple)
 +    {
 +        gmx_incons("combine_nblists does not support simple lists");
 +    }
 +
 +    nsci  = nblc->nsci;
 +    ncj4  = nblc->ncj4;
 +    nexcl = nblc->nexcl;
 +    for(i=0; i<nnbl; i++)
 +    {
 +        nsci  += nbl[i]->nsci;
 +        ncj4  += nbl[i]->ncj4;
 +        nexcl += nbl[i]->nexcl;
 +    }
 +
 +    if (nsci > nblc->sci_nalloc)
 +    {
 +        nb_realloc_sci(nblc,nsci);
 +    }
 +    if (ncj4 > nblc->cj4_nalloc)
 +    {
 +        nblc->cj4_nalloc = over_alloc_small(ncj4);
 +        nbnxn_realloc_void((void **)&nblc->cj4,
 +                           nblc->ncj4*sizeof(*nblc->cj4),
 +                           nblc->cj4_nalloc*sizeof(*nblc->cj4),
 +                           nblc->alloc,nblc->free);
 +    }
 +    if (nexcl > nblc->excl_nalloc)
 +    {
 +        nblc->excl_nalloc = over_alloc_small(nexcl);
 +        nbnxn_realloc_void((void **)&nblc->excl,
 +                           nblc->nexcl*sizeof(*nblc->excl),
 +                           nblc->excl_nalloc*sizeof(*nblc->excl),
 +                           nblc->alloc,nblc->free);
 +    }
 +
 +    /* Each thread should copy its own data to the combined arrays,
 +     * as otherwise data will go back and forth between different caches.
 +     */
 +#pragma omp parallel for num_threads(gmx_omp_nthreads_get(emntPairsearch)) schedule(static)
 +    for(n=0; n<nnbl; n++)
 +    {
 +        int sci_offset;
 +        int cj4_offset;
 +        int ci_offset;
 +        int excl_offset;
 +        int i,j4;
 +        const nbnxn_pairlist_t *nbli;
 +
 +        /* Determine the offset in the combined data for our thread */
 +        sci_offset  = nblc->nsci;
 +        cj4_offset  = nblc->ncj4;
 +        ci_offset   = nblc->nci_tot;
 +        excl_offset = nblc->nexcl;
 +
 +        for(i=0; i<n; i++)
 +        {
 +            sci_offset  += nbl[i]->nsci;
 +            cj4_offset  += nbl[i]->ncj4;
 +            ci_offset   += nbl[i]->nci_tot;
 +            excl_offset += nbl[i]->nexcl;
 +        }
 +
 +        nbli = nbl[n];
 +
 +        for(i=0; i<nbli->nsci; i++)
 +        {
 +            nblc->sci[sci_offset+i]                = nbli->sci[i];
 +            nblc->sci[sci_offset+i].cj4_ind_start += cj4_offset;
 +            nblc->sci[sci_offset+i].cj4_ind_end   += cj4_offset;
 +        }
 +
 +        for(j4=0; j4<nbli->ncj4; j4++)
 +        {
 +            nblc->cj4[cj4_offset+j4] = nbli->cj4[j4];
 +            nblc->cj4[cj4_offset+j4].imei[0].excl_ind += excl_offset;
 +            nblc->cj4[cj4_offset+j4].imei[1].excl_ind += excl_offset;
 +        }
 +
 +        for(j4=0; j4<nbli->nexcl; j4++)
 +        {
 +            nblc->excl[excl_offset+j4] = nbli->excl[j4];
 +        }
 +    }
 +
 +    for(n=0; n<nnbl; n++)
 +    {
 +        nblc->nsci    += nbl[n]->nsci;
 +        nblc->ncj4    += nbl[n]->ncj4;
 +        nblc->nci_tot += nbl[n]->nci_tot;
 +        nblc->nexcl   += nbl[n]->nexcl;
 +    }
 +}
 +
 +/* Returns the next ci to be processes by our thread */
 +static gmx_bool next_ci(const nbnxn_grid_t *grid,
 +                        int conv,
 +                        int nth,int ci_block,
 +                        int *ci_x,int *ci_y,
 +                        int *ci_b,int *ci)
 +{
 +    (*ci_b)++;
 +    (*ci)++;
 +
 +    if (*ci_b == ci_block)
 +    {
 +        /* Jump to the next block assigned to this task */
 +        *ci   += (nth - 1)*ci_block;
 +        *ci_b  = 0;
 +    }
 +
 +    if (*ci >= grid->nc*conv)
 +    {
 +        return FALSE;
 +    }
 +
 +    while (*ci >= grid->cxy_ind[*ci_x*grid->ncy + *ci_y + 1]*conv)
 +    {
 +        *ci_y += 1;
 +        if (*ci_y == grid->ncy)
 +        {
 +            *ci_x += 1;
 +            *ci_y  = 0;
 +        }
 +    }
 +
 +    return TRUE;
 +}
 +
 +/* Returns the distance^2 for which we put cell pairs in the list
 + * without checking atom pair distances. This is usually < rlist^2.
 + */
 +static float boundingbox_only_distance2(const nbnxn_grid_t *gridi,
 +                                        const nbnxn_grid_t *gridj,
 +                                        real rlist,
 +                                        gmx_bool simple)
 +{
 +    /* If the distance between two sub-cell bounding boxes is less
 +     * than this distance, do not check the distance between
 +     * all particle pairs in the sub-cell, since then it is likely
 +     * that the box pair has atom pairs within the cut-off.
 +     * We use the nblist cut-off minus 0.5 times the average x/y diagonal
 +     * spacing of the sub-cells. Around 40% of the checked pairs are pruned.
 +     * Using more than 0.5 gains at most 0.5%.
 +     * If forces are calculated more than twice, the performance gain
 +     * in the force calculation outweighs the cost of checking.
 +     * Note that with subcell lists, the atom-pair distance check
 +     * is only performed when only 1 out of 8 sub-cells in within range,
 +     * this is because the GPU is much faster than the cpu.
 +     */
 +    real bbx,bby;
 +    real rbb2;
 +
 +    bbx = 0.5*(gridi->sx + gridj->sx);
 +    bby = 0.5*(gridi->sy + gridj->sy);
 +    if (!simple)
 +    {
 +        bbx /= GPU_NSUBCELL_X;
 +        bby /= GPU_NSUBCELL_Y;
 +    }
 +
 +    rbb2 = sqr(max(0,rlist - 0.5*sqrt(bbx*bbx + bby*bby)));
 +
 +#ifndef GMX_DOUBLE
 +    return rbb2;
 +#else
 +    return (float)((1+GMX_FLOAT_EPS)*rbb2);
 +#endif
 +}
 +
 +static int get_ci_block_size(const nbnxn_grid_t *gridi,
 +                             gmx_bool bDomDec, int nth)
 +{
 +    const int ci_block_enum = 5;
 +    const int ci_block_denom = 11;
 +    const int ci_block_min_atoms = 16;
 +    int ci_block;
 +
 +    /* Here we decide how to distribute the blocks over the threads.
 +     * We use prime numbers to try to avoid that the grid size becomes
 +     * a multiple of the number of threads, which would lead to some
 +     * threads getting "inner" pairs and others getting boundary pairs,
 +     * which in turns will lead to load imbalance between threads.
 +     * Set the block size as 5/11/ntask times the average number of cells
 +     * in a y,z slab. This should ensure a quite uniform distribution
 +     * of the grid parts of the different thread along all three grid
 +     * zone boundaries with 3D domain decomposition. At the same time
 +     * the blocks will not become too small.
 +     */
 +    ci_block = (gridi->nc*ci_block_enum)/(ci_block_denom*gridi->ncx*nth);
 +
 +    /* Ensure the blocks are not too small: avoids cache invalidation */
 +    if (ci_block*gridi->na_sc < ci_block_min_atoms)
 +    {
 +        ci_block = (ci_block_min_atoms + gridi->na_sc - 1)/gridi->na_sc;
 +    }
 +    
 +    /* Without domain decomposition
 +     * or with less than 3 blocks per task, divide in nth blocks.
 +     */
 +    if (!bDomDec || ci_block*3*nth > gridi->nc)
 +    {
 +        ci_block = (gridi->nc + nth - 1)/nth;
 +    }
 +
 +    return ci_block;
 +}
 +
 +/* Generates the part of pair-list nbl assigned to our thread */
 +static void nbnxn_make_pairlist_part(const nbnxn_search_t nbs,
 +                                     const nbnxn_grid_t *gridi,
 +                                     const nbnxn_grid_t *gridj,
 +                                     nbnxn_search_work_t *work,
 +                                     const nbnxn_atomdata_t *nbat,
 +                                     const t_blocka *excl,
 +                                     real rlist,
 +                                     int nb_kernel_type,
 +                                     int ci_block,
 +                                     gmx_bool bFBufferFlag,
 +                                     int nsubpair_max,
 +                                     gmx_bool progBal,
 +                                     int min_ci_balanced,
 +                                     int th,int nth,
 +                                     nbnxn_pairlist_t *nbl)
 +{
 +    int  na_cj_2log;
 +    matrix box;
 +    real rl2;
 +    float rbb2;
 +    int  d;
 +    int  ci_b,ci,ci_x,ci_y,ci_xy,cj;
 +    ivec shp;
 +    int  tx,ty,tz;
 +    int  shift;
 +    gmx_bool bMakeList;
 +    real shx,shy,shz;
 +    int  conv_i,cell0_i;
 +    const float *bb_i,*bbcz_i,*bbcz_j;
 +    const int *flags_i;
 +    real bx0,bx1,by0,by1,bz0,bz1;
 +    real bz1_frac;
 +    real d2cx,d2z,d2z_cx,d2z_cy,d2zx,d2zxy,d2xy;
 +    int  cxf,cxl,cyf,cyf_x,cyl;
 +    int  cx,cy;
 +    int  c0,c1,cs,cf,cl;
 +    int  ndistc;
 +    int  ncpcheck;
 +    int  gridi_flag_shift=0,gridj_flag_shift=0;
 +    unsigned *gridj_flag=NULL;
 +    int  ncj_old_i,ncj_old_j;
 +
 +    nbs_cycle_start(&work->cc[enbsCCsearch]);
 +
 +    if (gridj->bSimple != nbl->bSimple)
 +    {
 +        gmx_incons("Grid incompatible with pair-list");
 +    }
 +
 +    sync_work(nbl);
 +    nbl->na_sc = gridj->na_sc;
 +    nbl->na_ci = gridj->na_c;
 +    nbl->na_cj = nbnxn_kernel_to_cj_size(nb_kernel_type);
 +    na_cj_2log = get_2log(nbl->na_cj);
 +
 +    nbl->rlist  = rlist;
 +
 +    if (bFBufferFlag)
 +    {
 +        /* Determine conversion of clusters to flag blocks */
 +        gridi_flag_shift = 0;
 +        while ((nbl->na_ci<<gridi_flag_shift) < NBNXN_BUFFERFLAG_SIZE)
 +        {
 +            gridi_flag_shift++;
 +        }
 +        gridj_flag_shift = 0;
 +        while ((nbl->na_cj<<gridj_flag_shift) < NBNXN_BUFFERFLAG_SIZE)
 +        {
 +            gridj_flag_shift++;
 +        }
 +
 +        gridj_flag = work->buffer_flags.flag;
 +    }
 +
 +    copy_mat(nbs->box,box);
 +
 +    rl2 = nbl->rlist*nbl->rlist;
 +
 +    rbb2 = boundingbox_only_distance2(gridi,gridj,nbl->rlist,nbl->bSimple);
 +
 +    if (debug)
 +    {
 +        fprintf(debug,"nbl bounding box only distance %f\n",sqrt(rbb2));
 +    }
 +
 +    /* Set the shift range */
 +    for(d=0; d<DIM; d++)
 +    {
 +        /* Check if we need periodicity shifts.
 +         * Without PBC or with domain decomposition we don't need them.
 +         */
 +        if (d >= ePBC2npbcdim(nbs->ePBC) || nbs->dd_dim[d])
 +        {
 +            shp[d] = 0;
 +        }
 +        else
 +        {
 +            if (d == XX &&
 +                box[XX][XX] - fabs(box[YY][XX]) - fabs(box[ZZ][XX]) < sqrt(rl2))
 +            {
 +                shp[d] = 2;
 +            }
 +            else
 +            {
 +                shp[d] = 1;
 +            }
 +        }
 +    }
 +
 +    if (nbl->bSimple && !gridi->bSimple)
 +    {
 +        conv_i  = gridi->na_sc/gridj->na_sc;
 +        bb_i    = gridi->bb_simple;
 +        bbcz_i  = gridi->bbcz_simple;
 +        flags_i = gridi->flags_simple;
 +    }
 +    else
 +    {
 +        conv_i  = 1;
 +        bb_i    = gridi->bb;
 +        bbcz_i  = gridi->bbcz;
 +        flags_i = gridi->flags;
 +    }
 +    cell0_i = gridi->cell0*conv_i;
 +
 +    bbcz_j = gridj->bbcz;
 +
 +    if (conv_i != 1)
 +    {
 +        /* Blocks of the conversion factor - 1 give a large repeat count
 +         * combined with a small block size. This should result in good
 +         * load balancing for both small and large domains.
 +         */
 +        ci_block = conv_i - 1;
 +    }
 +    if (debug)
 +    {
 +        fprintf(debug,"nbl nc_i %d col.av. %.1f ci_block %d\n",
 +                gridi->nc,gridi->nc/(double)(gridi->ncx*gridi->ncy),ci_block);
 +    }
 +
 +    ndistc = 0;
 +    ncpcheck = 0;
 +
 +    /* Initially ci_b and ci to 1 before where we want them to start,
 +     * as they will both be incremented in next_ci.
 +     */
 +    ci_b = -1;
 +    ci   = th*ci_block - 1;
 +    ci_x = 0;
 +    ci_y = 0;
 +    while (next_ci(gridi,conv_i,nth,ci_block,&ci_x,&ci_y,&ci_b,&ci))
 +    {
 +        if (nbl->bSimple && flags_i[ci] == 0)
 +        {
 +            continue;
 +        }
 +
 +        ncj_old_i = nbl->ncj;
 +
 +        d2cx = 0;
 +        if (gridj != gridi && shp[XX] == 0)
 +        {
 +            if (nbl->bSimple)
 +            {
 +                bx1 = bb_i[ci*NNBSBB_B+NNBSBB_C+XX];
 +            }
 +            else
 +            {
 +                bx1 = gridi->c0[XX] + (ci_x+1)*gridi->sx;
 +            }
 +            if (bx1 < gridj->c0[XX])
 +            {
 +                d2cx = sqr(gridj->c0[XX] - bx1);
 +
 +                if (d2cx >= rl2)
 +                {
 +                    continue;
 +                }
 +            }
 +        }
 +
 +        ci_xy = ci_x*gridi->ncy + ci_y;
 +
 +        /* Loop over shift vectors in three dimensions */
 +        for (tz=-shp[ZZ]; tz<=shp[ZZ]; tz++)
 +        {
 +            shz = tz*box[ZZ][ZZ];
 +
 +            bz0 = bbcz_i[ci*NNBSBB_D  ] + shz;
 +            bz1 = bbcz_i[ci*NNBSBB_D+1] + shz;
 +
 +            if (tz == 0)
 +            {
 +                d2z = 0;
 +            }
 +            else if (tz < 0)
 +            {
 +                d2z = sqr(bz1);
 +            }
 +            else
 +            {
 +                d2z = sqr(bz0 - box[ZZ][ZZ]);
 +            }
 +
 +            d2z_cx = d2z + d2cx;
 +
 +            if (d2z_cx >= rl2)
 +            {
 +                continue;
 +            }
 +
 +            bz1_frac =
 +                bz1/((real)(gridi->cxy_ind[ci_xy+1] - gridi->cxy_ind[ci_xy]));
 +            if (bz1_frac < 0)
 +            {
 +                bz1_frac = 0;
 +            }
 +            /* The check with bz1_frac close to or larger than 1 comes later */
 +
 +            for (ty=-shp[YY]; ty<=shp[YY]; ty++)
 +            {
 +                shy = ty*box[YY][YY] + tz*box[ZZ][YY];
 +
 +                if (nbl->bSimple)
 +                {
 +                    by0 = bb_i[ci*NNBSBB_B         +YY] + shy;
 +                    by1 = bb_i[ci*NNBSBB_B+NNBSBB_C+YY] + shy;
 +                }
 +                else
 +                {
 +                    by0 = gridi->c0[YY] + (ci_y  )*gridi->sy + shy;
 +                    by1 = gridi->c0[YY] + (ci_y+1)*gridi->sy + shy;
 +                }
 +
 +                get_cell_range(by0,by1,
 +                               gridj->ncy,gridj->c0[YY],gridj->sy,gridj->inv_sy,
 +                               d2z_cx,rl2,
 +                               &cyf,&cyl);
 +
 +                if (cyf > cyl)
 +                {
 +                    continue;
 +                }
 +
 +                d2z_cy = d2z;
 +                if (by1 < gridj->c0[YY])
 +                {
 +                    d2z_cy += sqr(gridj->c0[YY] - by1);
 +                }
 +                else if (by0 > gridj->c1[YY])
 +                {
 +                    d2z_cy += sqr(by0 - gridj->c1[YY]);
 +                }
 +
 +                for (tx=-shp[XX]; tx<=shp[XX]; tx++)
 +                {
 +                    shift = XYZ2IS(tx,ty,tz);
 +
 +#ifdef NBNXN_SHIFT_BACKWARD
 +                    if (gridi == gridj && shift > CENTRAL)
 +                    {
 +                        continue;
 +                    }
 +#endif
 +
 +                    shx = tx*box[XX][XX] + ty*box[YY][XX] + tz*box[ZZ][XX];
 +
 +                    if (nbl->bSimple)
 +                    {
 +                        bx0 = bb_i[ci*NNBSBB_B         +XX] + shx;
 +                        bx1 = bb_i[ci*NNBSBB_B+NNBSBB_C+XX] + shx;
 +                    }
 +                    else
 +                    {
 +                        bx0 = gridi->c0[XX] + (ci_x  )*gridi->sx + shx;
 +                        bx1 = gridi->c0[XX] + (ci_x+1)*gridi->sx + shx;
 +                    }
 +
 +                    get_cell_range(bx0,bx1,
 +                                   gridj->ncx,gridj->c0[XX],gridj->sx,gridj->inv_sx,
 +                                   d2z_cy,rl2,
 +                                   &cxf,&cxl);
 +
 +                    if (cxf > cxl)
 +                    {
 +                        continue;
 +                    }
 +
 +                    if (nbl->bSimple)
 +                    {
 +                        new_ci_entry(nbl,cell0_i+ci,shift,flags_i[ci],
 +                                     nbl->work);
 +                    }
 +                    else
 +                    {
 +                        new_sci_entry(nbl,cell0_i+ci,shift,flags_i[ci],
 +                                      nbl->work);
 +                    }
 +
 +#ifndef NBNXN_SHIFT_BACKWARD
 +                    if (cxf < ci_x)
 +#else
 +                    if (shift == CENTRAL && gridi == gridj &&
 +                        cxf < ci_x)
 +#endif
 +                    {
 +                        /* Leave the pairs with i > j.
 +                         * x is the major index, so skip half of it.
 +                         */
 +                        cxf = ci_x;
 +                    }
 +
 +                    if (nbl->bSimple)
 +                    {
 +                        set_icell_bb_simple(bb_i,ci,shx,shy,shz,
 +                                            nbl->work->bb_ci);
 +                    }
 +                    else
 +                    {
 +                        set_icell_bb_supersub(bb_i,ci,shx,shy,shz,
 +                                              nbl->work->bb_ci);
 +                    }
 +
 +                    nbs->icell_set_x(cell0_i+ci,shx,shy,shz,
 +                                     gridi->na_c,nbat->xstride,nbat->x,
 +                                     nbl->work);
 +
 +                    for(cx=cxf; cx<=cxl; cx++)
 +                    {
 +                        d2zx = d2z;
 +                        if (gridj->c0[XX] + cx*gridj->sx > bx1)
 +                        {
 +                            d2zx += sqr(gridj->c0[XX] + cx*gridj->sx - bx1);
 +                        }
 +                        else if (gridj->c0[XX] + (cx+1)*gridj->sx < bx0)
 +                        {
 +                            d2zx += sqr(gridj->c0[XX] + (cx+1)*gridj->sx - bx0);
 +                        }
 +
 +#ifndef NBNXN_SHIFT_BACKWARD
 +                        if (gridi == gridj &&
 +                            cx == 0 && cyf < ci_y)
 +#else
 +                        if (gridi == gridj &&
 +                            cx == 0 && shift == CENTRAL && cyf < ci_y)
 +#endif
 +                        {
 +                            /* Leave the pairs with i > j.
 +                             * Skip half of y when i and j have the same x.
 +                             */
 +                            cyf_x = ci_y;
 +                        }
 +                        else
 +                        {
 +                            cyf_x = cyf;
 +                        }
 +
 +                        for(cy=cyf_x; cy<=cyl; cy++)
 +                        {
 +                            c0 = gridj->cxy_ind[cx*gridj->ncy+cy];
 +                            c1 = gridj->cxy_ind[cx*gridj->ncy+cy+1];
 +#ifdef NBNXN_SHIFT_BACKWARD
 +                            if (gridi == gridj &&
 +                                shift == CENTRAL && c0 < ci)
 +                            {
 +                                c0 = ci;
 +                            }
 +#endif
 +
 +                            d2zxy = d2zx;
 +                            if (gridj->c0[YY] + cy*gridj->sy > by1)
 +                            {
 +                                d2zxy += sqr(gridj->c0[YY] + cy*gridj->sy - by1);
 +                            }
 +                            else if (gridj->c0[YY] + (cy+1)*gridj->sy < by0)
 +                            {
 +                                d2zxy += sqr(gridj->c0[YY] + (cy+1)*gridj->sy - by0);
 +                            }
 +                            if (c1 > c0 && d2zxy < rl2)
 +                            {
 +                                cs = c0 + (int)(bz1_frac*(c1 - c0));
 +                                if (cs >= c1)
 +                                {
 +                                    cs = c1 - 1;
 +                                }
 +
 +                                d2xy = d2zxy - d2z;
 +
 +                                /* Find the lowest cell that can possibly
 +                                 * be within range.
 +                                 */
 +                                cf = cs;
 +                                while(cf > c0 &&
 +                                      (bbcz_j[cf*NNBSBB_D+1] >= bz0 ||
 +                                       d2xy + sqr(bbcz_j[cf*NNBSBB_D+1] - bz0) < rl2))
 +                                {
 +                                    cf--;
 +                                }
 +
 +                                /* Find the highest cell that can possibly
 +                                 * be within range.
 +                                 */
 +                                cl = cs;
 +                                while(cl < c1-1 &&
 +                                      (bbcz_j[cl*NNBSBB_D] <= bz1 ||
 +                                       d2xy + sqr(bbcz_j[cl*NNBSBB_D] - bz1) < rl2))
 +                                {
 +                                    cl++;
 +                                }
 +
 +#ifdef NBNXN_REFCODE
 +                                {
 +                                    /* Simple reference code, for debugging,
 +                                     * overrides the more complex code above.
 +                                     */
 +                                    int k;
 +                                    cf = c1;
 +                                    cl = -1;
 +                                    for(k=c0; k<c1; k++)
 +                                    {
 +                                        if (box_dist2(bx0,bx1,by0,by1,bz0,bz1,
 +                                                      bb+k*NNBSBB_B) < rl2 &&
 +                                            k < cf)
 +                                        {
 +                                            cf = k;
 +                                        }
 +                                        if (box_dist2(bx0,bx1,by0,by1,bz0,bz1,
 +                                                      bb+k*NNBSBB_B) < rl2 &&
 +                                            k > cl)
 +                                        {
 +                                            cl = k;
 +                                        }
 +                                    }
 +                                }
 +#endif
 +
 +                                if (gridi == gridj)
 +                                {
 +                                    /* We want each atom/cell pair only once,
 +                                     * only use cj >= ci.
 +                                     */
 +#ifndef NBNXN_SHIFT_BACKWARD
 +                                    cf = max(cf,ci);
 +#else
 +                                    if (shift == CENTRAL)
 +                                    {
 +                                        cf = max(cf,ci);
 +                                    }
 +#endif
 +                                }
 +
 +                                if (cf <= cl)
 +                                {
 +                                    /* For f buffer flags with simple lists */
 +                                    ncj_old_j = nbl->ncj;
 +
 +                                    switch (nb_kernel_type)
 +                                    {
 +                                    case nbnxnk4x4_PlainC:
 +                                        check_subcell_list_space_simple(nbl,cl-cf+1);
 +
 +                                        make_cluster_list_simple(gridj,
 +                                                                 nbl,ci,cf,cl,
 +                                                                 (gridi == gridj && shift == CENTRAL),
 +                                                                 nbat->x,
 +                                                                 rl2,rbb2,
 +                                                                 &ndistc);
 +                                        break;
 +#ifdef GMX_NBNXN_SIMD_4XN
 +                                    case nbnxnk4xN_SIMD_4xN:
 +                                        check_subcell_list_space_simple(nbl,ci_to_cj(na_cj_2log,cl-cf)+2);
 +                                        make_cluster_list_simd_4xn(gridj,
 +                                                                   nbl,ci,cf,cl,
 +                                                                   (gridi == gridj && shift == CENTRAL),
 +                                                                   nbat->x,
 +                                                                   rl2,rbb2,
 +                                                                   &ndistc);
 +                                        break;
 +#endif
 +#ifdef GMX_NBNXN_SIMD_2XNN
 +                                    case nbnxnk4xN_SIMD_2xNN:
 +                                        check_subcell_list_space_simple(nbl,ci_to_cj(na_cj_2log,cl-cf)+2);
 +                                        make_cluster_list_simd_2xnn(gridj,
 +                                                                   nbl,ci,cf,cl,
 +                                                                   (gridi == gridj && shift == CENTRAL),
 +                                                                   nbat->x,
 +                                                                   rl2,rbb2,
 +                                                                   &ndistc);
 +                                        break;
 +#endif
 +                                    case nbnxnk8x8x8_PlainC:
 +                                    case nbnxnk8x8x8_CUDA:
 +                                        check_subcell_list_space_supersub(nbl,cl-cf+1);
 +                                        for(cj=cf; cj<=cl; cj++)
 +                                        {
 +                                            make_cluster_list_supersub(nbs,gridi,gridj,
 +                                                                       nbl,ci,cj,
 +                                                                       (gridi == gridj && shift == CENTRAL && ci == cj),
 +                                                                       nbat->xstride,nbat->x,
 +                                                                       rl2,rbb2,
 +                                                                       &ndistc);
 +                                        }
 +                                        break;
 +                                    }
 +                                    ncpcheck += cl - cf + 1;
 +
 +                                    if (bFBufferFlag && nbl->ncj > ncj_old_j)
 +                                    {
 +                                        int cbf,cbl,cb;
 +
 +                                        cbf = nbl->cj[ncj_old_j].cj >> gridj_flag_shift;
 +                                        cbl = nbl->cj[nbl->ncj-1].cj >> gridj_flag_shift;
 +                                        for(cb=cbf; cb<=cbl; cb++)
 +                                        {
 +                                            gridj_flag[cb] = 1U<<th;
 +                                        }
 +                                    }
 +                                }
 +                            }
 +                        }
 +                    }
 +
 +                    /* Set the exclusions for this ci list */
 +                    if (nbl->bSimple)
 +                    {
 +                        set_ci_top_excls(nbs,
 +                                         nbl,
 +                                         shift == CENTRAL && gridi == gridj,
 +                                         gridj->na_c_2log,
 +                                         na_cj_2log,
 +                                         &(nbl->ci[nbl->nci]),
 +                                         excl);
 +                    }
 +                    else
 +                    {
 +                        set_sci_top_excls(nbs,
 +                                          nbl,
 +                                          shift == CENTRAL && gridi == gridj,
 +                                          gridj->na_c_2log,
 +                                          &(nbl->sci[nbl->nsci]),
 +                                          excl);
 +                    }
 +
 +                    /* Close this ci list */
 +                    if (nbl->bSimple)
 +                    {
 +                        close_ci_entry_simple(nbl);
 +                    }
 +                    else
 +                    {
 +                        close_ci_entry_supersub(nbl,
 +                                                nsubpair_max,
 +                                                progBal,min_ci_balanced,
 +                                                th,nth);
 +                    }
 +                }
 +            }
 +        }
 +
 +        if (bFBufferFlag && nbl->ncj > ncj_old_i)
 +        {
 +            work->buffer_flags.flag[(gridi->cell0+ci)>>gridi_flag_shift] = 1U<<th;
 +        }
 +    }
 +
 +    work->ndistc = ndistc;
 +
 +    nbs_cycle_stop(&work->cc[enbsCCsearch]);
 +
 +    if (debug)
 +    {
 +        fprintf(debug,"number of distance checks %d\n",ndistc);
 +        fprintf(debug,"ncpcheck %s %d\n",gridi==gridj ? "local" : "non-local",
 +                ncpcheck);
 +
 +        if (nbl->bSimple)
 +        {
 +            print_nblist_statistics_simple(debug,nbl,nbs,rlist);
 +        }
 +        else
 +        {
 +            print_nblist_statistics_supersub(debug,nbl,nbs,rlist);
 +        }
 +
 +    }
 +}
 +
 +static void reduce_buffer_flags(const nbnxn_search_t nbs,
 +                                int nsrc,
 +                                const nbnxn_buffer_flags_t *dest)
 +{
 +    int s,b;
 +    const unsigned *flag;
 +
 +    for(s=0; s<nsrc; s++)
 +    {
 +        flag = nbs->work[s].buffer_flags.flag;
 +
 +        for(b=0; b<dest->nflag; b++)
 +        {
 +            dest->flag[b] |= flag[b];
 +        }
 +    }
 +}
 +
 +static void print_reduction_cost(const nbnxn_buffer_flags_t *flags,int nout)
 +{
 +    int nelem,nkeep,ncopy,nred,b,c,out;
 +
 +    nelem = 0;
 +    nkeep = 0;
 +    ncopy = 0;
 +    nred  = 0;
 +    for(b=0; b<flags->nflag; b++)
 +    {
 +        if (flags->flag[b] == 1)
 +        {
 +            /* Only flag 0 is set, no copy of reduction required */
 +            nelem++;
 +            nkeep++;
 +        }
 +        else if (flags->flag[b] > 0)
 +        {
 +            c = 0;
 +            for(out=0; out<nout; out++)
 +            {
 +                if (flags->flag[b] & (1U<<out))
 +                {
 +                    c++;
 +                }
 +            }
 +            nelem += c;
 +            if (c == 1)
 +            {
 +                ncopy++;
 +            }
 +            else
 +            {
 +                nred += c;
 +            }
 +        }
 +    }
 +
 +    fprintf(debug,"nbnxn reduction: #flag %d #list %d elem %4.2f, keep %4.2f copy %4.2f red %4.2f\n",
 +            flags->nflag,nout,
 +            nelem/(double)(flags->nflag),
 +            nkeep/(double)(flags->nflag),
 +            ncopy/(double)(flags->nflag),
 +            nred/(double)(flags->nflag));
 +}
 +
 +/* Make a local or non-local pair-list, depending on iloc */
 +void nbnxn_make_pairlist(const nbnxn_search_t nbs,
 +                         nbnxn_atomdata_t *nbat,
 +                         const t_blocka *excl,
 +                         real rlist,
 +                         int min_ci_balanced,
 +                         nbnxn_pairlist_set_t *nbl_list,
 +                         int iloc,
 +                         int nb_kernel_type,
 +                         t_nrnb *nrnb)
 +{
 +    nbnxn_grid_t *gridi,*gridj;
 +    int nzi,zi,zj0,zj1,zj;
 +    int nsubpair_max;
 +    int th;
 +    int nnbl;
 +    nbnxn_pairlist_t **nbl;
 +    int ci_block;
 +    gmx_bool CombineNBLists;
 +    int np_tot,np_noq,np_hlj,nap;
 +
 +    nnbl            = nbl_list->nnbl;
 +    nbl             = nbl_list->nbl;
 +    CombineNBLists  = nbl_list->bCombined;
 +
 +    if (debug)
 +    {
 +        fprintf(debug,"ns making %d nblists\n", nnbl);
 +    }
 +
 +    nbat->bUseBufferFlags = (nbat->nout > 1);
 +    if (nbat->bUseBufferFlags && LOCAL_I(iloc))
 +    {
 +        init_buffer_flags(&nbat->buffer_flags,nbat->natoms);
 +    }
 +
 +    if (nbl_list->bSimple)
 +    {
 +        switch (nb_kernel_type)
 +        {
 +#ifdef GMX_NBNXN_SIMD_4XN
 +        case nbnxnk4xN_SIMD_4xN:
 +            nbs->icell_set_x = icell_set_x_simd_4xn;
 +            break;
 +#endif
 +#ifdef GMX_NBNXN_SIMD_2XNN
 +        case nbnxnk4xN_SIMD_2xNN:
 +            nbs->icell_set_x = icell_set_x_simd_2xnn;
 +            break;
 +#endif
 +        default:
 +            nbs->icell_set_x = icell_set_x_simple;
 +            break;
 +        }
 +    }
 +    else
 +    {
++#ifdef NBNXN_SEARCH_BB_SSE
 +        nbs->icell_set_x = icell_set_x_supersub_sse8;
 +#else
 +        nbs->icell_set_x = icell_set_x_supersub;
 +#endif
 +    }
 +
 +    if (LOCAL_I(iloc))
 +    {
 +        /* Only zone (grid) 0 vs 0 */
 +        nzi = 1;
 +        zj0 = 0;
 +        zj1 = 1;
 +    }
 +    else
 +    {
 +        nzi = nbs->zones->nizone;
 +    }
 +
 +    if (!nbl_list->bSimple && min_ci_balanced > 0)
 +    {
 +        nsubpair_max = get_nsubpair_max(nbs,iloc,rlist,min_ci_balanced);
 +    }
 +    else
 +    {
 +        nsubpair_max = 0;
 +    }
 +
 +    /* Clear all pair-lists */
 +    for(th=0; th<nnbl; th++)
 +    {
 +        clear_pairlist(nbl[th]);
 +    }
 +
 +    for(zi=0; zi<nzi; zi++)
 +    {
 +        gridi = &nbs->grid[zi];
 +
 +        if (NONLOCAL_I(iloc))
 +        {
 +            zj0 = nbs->zones->izone[zi].j0;
 +            zj1 = nbs->zones->izone[zi].j1;
 +            if (zi == 0)
 +            {
 +                zj0++;
 +            }
 +        }
 +        for(zj=zj0; zj<zj1; zj++)
 +        {
 +            gridj = &nbs->grid[zj];
 +
 +            if (debug)
 +            {
 +                fprintf(debug,"ns search grid %d vs %d\n",zi,zj);
 +            }
 +
 +            nbs_cycle_start(&nbs->cc[enbsCCsearch]);
 +
 +            if (nbl[0]->bSimple && !gridi->bSimple)
 +            {
 +                /* Hybrid list, determine blocking later */
 +                ci_block = 0;
 +            }
 +            else
 +            {
 +                ci_block = get_ci_block_size(gridi,nbs->DomDec,nnbl);
 +            }
 +
 +#pragma omp parallel for num_threads(nnbl) schedule(static)
 +            for(th=0; th<nnbl; th++)
 +            {
 +                if (nbat->bUseBufferFlags && zi == 0 && zj == 0)
 +                {
 +                    init_buffer_flags(&nbs->work[th].buffer_flags,nbat->natoms);
 +                }
 +
 +                if (CombineNBLists && th > 0)
 +                {
 +                    clear_pairlist(nbl[th]);
 +                }
 +
 +                /* Divide the i super cell equally over the nblists */
 +                nbnxn_make_pairlist_part(nbs,gridi,gridj,
 +                                         &nbs->work[th],nbat,excl,
 +                                         rlist,
 +                                         nb_kernel_type,
 +                                         ci_block,
 +                                         nbat->bUseBufferFlags,
 +                                         nsubpair_max,
 +                                         (LOCAL_I(iloc) || nbs->zones->n <= 2),
 +                                         min_ci_balanced,
 +                                         th,nnbl,
 +                                         nbl[th]);
 +            }
 +            nbs_cycle_stop(&nbs->cc[enbsCCsearch]);
 +
 +            np_tot = 0;
 +            np_noq = 0;
 +            np_hlj = 0;
 +            for(th=0; th<nnbl; th++)
 +            {
 +                inc_nrnb(nrnb,eNR_NBNXN_DIST2,nbs->work[th].ndistc);
 +
 +                if (nbl_list->bSimple)
 +                {
 +                    np_tot += nbl[th]->ncj;
 +                    np_noq += nbl[th]->work->ncj_noq;
 +                    np_hlj += nbl[th]->work->ncj_hlj;
 +                }
 +                else
 +                {
 +                    /* This count ignores potential subsequent pair pruning */
 +                    np_tot += nbl[th]->nci_tot;
 +                }
 +            }
 +            nap = nbl[0]->na_ci*nbl[0]->na_cj;
 +            nbl_list->natpair_ljq = (np_tot - np_noq)*nap - np_hlj*nap/2;
 +            nbl_list->natpair_lj  = np_noq*nap;
 +            nbl_list->natpair_q   = np_hlj*nap/2;
 +
 +            if (CombineNBLists && nnbl > 1)
 +            {
 +                nbs_cycle_start(&nbs->cc[enbsCCcombine]);
 +
 +                combine_nblists(nnbl-1,nbl+1,nbl[0]);
 +
 +                nbs_cycle_stop(&nbs->cc[enbsCCcombine]);
 +            }
 +        }
 +    }
 +
 +    if (nbat->bUseBufferFlags)
 +    {
 +        reduce_buffer_flags(nbs,nnbl,&nbat->buffer_flags);
 +    }
 +
 +    /*
 +    print_supersub_nsp("nsubpair",nbl[0],iloc);
 +    */
 +
 +    /* Special performance logging stuff (env.var. GMX_NBNXN_CYCLE) */
 +    if (LOCAL_I(iloc))
 +    {
 +        nbs->search_count++;
 +    }
 +    if (nbs->print_cycles &&
 +        (!nbs->DomDec || (nbs->DomDec && !LOCAL_I(iloc))) &&
 +        nbs->search_count % 100 == 0)
 +    {
 +        nbs_cycle_print(stderr,nbs);
 +    }
 +
 +    if (debug && (CombineNBLists && nnbl > 1))
 +    {
 +        if (nbl[0]->bSimple)
 +        {
 +            print_nblist_statistics_simple(debug,nbl[0],nbs,rlist);
 +        }
 +        else
 +        {
 +            print_nblist_statistics_supersub(debug,nbl[0],nbs,rlist);
 +        }
 +    }
 +
 +    if (debug)
 +    {
 +        if (gmx_debug_at)
 +        {
 +            if (nbl[0]->bSimple)
 +            {
 +                print_nblist_ci_cj(debug,nbl[0]);
 +            }
 +            else
 +            {
 +                print_nblist_sci_cj(debug,nbl[0]);
 +            }
 +        }
 +
 +        if (nbat->bUseBufferFlags)
 +        {
 +            print_reduction_cost(&nbat->buffer_flags,nnbl);
 +        }
 +    }
 +}
index 7e1a7fc21b56e513fe66ec5f1796ac775e4d3088,0000000000000000000000000000000000000000..c66086631a5851a488e3afc852996f0c1cf03f9e
mode 100644,000000..100644
--- /dev/null
@@@ -1,2648 -1,0 +1,2651 @@@
-         do_flood(fplog,cr,x,f,ed,box,step,bNS);
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + *          GROningen MAchine for Chemical Simulations
 + *
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + *
 + * For more info, check our website at http://www.gromacs.org
 + *
 + * And Hey:
 + * GROwing Monsters And Cloning Shrimps
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#ifdef GMX_CRAY_XT3
 +#include<catamount/dclock.h>
 +#endif
 +
 +
 +#include <stdio.h>
 +#include <time.h>
 +#ifdef HAVE_SYS_TIME_H
 +#include <sys/time.h>
 +#endif
 +#include <math.h>
 +#include "typedefs.h"
 +#include "string2.h"
 +#include "gmxfio.h"
 +#include "smalloc.h"
 +#include "names.h"
 +#include "confio.h"
 +#include "mvdata.h"
 +#include "txtdump.h"
 +#include "pbc.h"
 +#include "chargegroup.h"
 +#include "vec.h"
 +#include <time.h>
 +#include "nrnb.h"
 +#include "mshift.h"
 +#include "mdrun.h"
 +#include "sim_util.h"
 +#include "update.h"
 +#include "physics.h"
 +#include "main.h"
 +#include "mdatoms.h"
 +#include "force.h"
 +#include "bondf.h"
 +#include "pme.h"
 +#include "disre.h"
 +#include "orires.h"
 +#include "network.h"
 +#include "calcmu.h"
 +#include "constr.h"
 +#include "xvgr.h"
 +#include "trnio.h"
 +#include "xtcio.h"
 +#include "copyrite.h"
 +#include "pull_rotation.h"
 +#include "gmx_random.h"
 +#include "domdec.h"
 +#include "partdec.h"
 +#include "gmx_wallcycle.h"
 +#include "genborn.h"
 +#include "nbnxn_atomdata.h"
 +#include "nbnxn_search.h"
 +#include "nbnxn_kernels/nbnxn_kernel_ref.h"
 +#include "nbnxn_kernels/nbnxn_kernel_simd_4xn.h"
 +#include "nbnxn_kernels/nbnxn_kernel_simd_2xnn.h"
 +#include "nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
 +
 +#ifdef GMX_LIB_MPI
 +#include <mpi.h>
 +#endif
 +#ifdef GMX_THREAD_MPI
 +#include "tmpi.h"
 +#endif
 +
 +#include "adress.h"
 +#include "qmmm.h"
 +
 +#include "nbnxn_cuda_data_mgmt.h"
 +#include "nbnxn_cuda/nbnxn_cuda.h"
 +
 +#if 0
 +typedef struct gmx_timeprint {
 +
 +} t_gmx_timeprint;
 +#endif
 +
 +/* Portable version of ctime_r implemented in src/gmxlib/string2.c, but we do not want it declared in public installed headers */
 +char *
 +gmx_ctime_r(const time_t *clock,char *buf, int n);
 +
 +
 +double
 +gmx_gettime()
 +{
 +#ifdef HAVE_GETTIMEOFDAY
 +      struct timeval t;
 +      double seconds;
 +
 +      gettimeofday(&t,NULL);
 +
 +      seconds = (double) t.tv_sec + 1e-6*(double)t.tv_usec;
 +
 +      return seconds;
 +#else
 +      double  seconds;
 +
 +      seconds = time(NULL);
 +
 +      return seconds;
 +#endif
 +}
 +
 +
 +#define difftime(end,start) ((double)(end)-(double)(start))
 +
 +void print_time(FILE *out,gmx_runtime_t *runtime,gmx_large_int_t step,
 +                t_inputrec *ir, t_commrec *cr)
 +{
 +    time_t finish;
 +    char   timebuf[STRLEN];
 +    double dt;
 +    char buf[48];
 +
 +#ifndef GMX_THREAD_MPI
 +    if (!PAR(cr))
 +#endif
 +    {
 +        fprintf(out,"\r");
 +    }
 +    fprintf(out,"step %s",gmx_step_str(step,buf));
 +    if ((step >= ir->nstlist))
 +    {
 +        runtime->last = gmx_gettime();
 +        dt = difftime(runtime->last,runtime->real);
 +        runtime->time_per_step = dt/(step - ir->init_step + 1);
 +
 +        dt = (ir->nsteps + ir->init_step - step)*runtime->time_per_step;
 +
 +        if (ir->nsteps >= 0)
 +        {
 +            if (dt >= 300)
 +            {
 +                finish = (time_t) (runtime->last + dt);
 +                gmx_ctime_r(&finish,timebuf,STRLEN);
 +                sprintf(buf,"%s",timebuf);
 +                buf[strlen(buf)-1]='\0';
 +                fprintf(out,", will finish %s",buf);
 +            }
 +            else
 +                fprintf(out,", remaining runtime: %5d s          ",(int)dt);
 +        }
 +        else
 +        {
 +            fprintf(out," performance: %.1f ns/day    ",
 +                    ir->delta_t/1000*24*60*60/runtime->time_per_step);
 +        }
 +    }
 +#ifndef GMX_THREAD_MPI
 +    if (PAR(cr))
 +    {
 +        fprintf(out,"\n");
 +    }
 +#endif
 +
 +    fflush(out);
 +}
 +
 +#ifdef NO_CLOCK
 +#define clock() -1
 +#endif
 +
 +static double set_proctime(gmx_runtime_t *runtime)
 +{
 +    double diff;
 +#ifdef GMX_CRAY_XT3
 +    double prev;
 +
 +    prev = runtime->proc;
 +    runtime->proc = dclock();
 +
 +    diff = runtime->proc - prev;
 +#else
 +    clock_t prev;
 +
 +    prev = runtime->proc;
 +    runtime->proc = clock();
 +
 +    diff = (double)(runtime->proc - prev)/(double)CLOCKS_PER_SEC;
 +#endif
 +    if (diff < 0)
 +    {
 +        /* The counter has probably looped, ignore this data */
 +        diff = 0;
 +    }
 +
 +    return diff;
 +}
 +
 +void runtime_start(gmx_runtime_t *runtime)
 +{
 +    runtime->real = gmx_gettime();
 +    runtime->proc          = 0;
 +    set_proctime(runtime);
 +    runtime->realtime      = 0;
 +    runtime->proctime      = 0;
 +    runtime->last          = 0;
 +    runtime->time_per_step = 0;
 +}
 +
 +void runtime_end(gmx_runtime_t *runtime)
 +{
 +    double now;
 +
 +    now = gmx_gettime();
 +
 +    runtime->proctime += set_proctime(runtime);
 +    runtime->realtime  = now - runtime->real;
 +    runtime->real      = now;
 +}
 +
 +void runtime_upd_proc(gmx_runtime_t *runtime)
 +{
 +    runtime->proctime += set_proctime(runtime);
 +}
 +
 +void print_date_and_time(FILE *fplog,int nodeid,const char *title,
 +                         const gmx_runtime_t *runtime)
 +{
 +    int i;
 +    char timebuf[STRLEN];
 +    char time_string[STRLEN];
 +    time_t tmptime;
 +
 +    if (fplog)
 +    {
 +        if (runtime != NULL)
 +        {
 +            tmptime = (time_t) runtime->real;
 +            gmx_ctime_r(&tmptime,timebuf,STRLEN);
 +        }
 +        else
 +        {
 +            tmptime = (time_t) gmx_gettime();
 +            gmx_ctime_r(&tmptime,timebuf,STRLEN);
 +        }
 +        for(i=0; timebuf[i]>=' '; i++)
 +        {
 +            time_string[i]=timebuf[i];
 +        }
 +        time_string[i]='\0';
 +
 +        fprintf(fplog,"%s on node %d %s\n",title,nodeid,time_string);
 +    }
 +}
 +
 +static void sum_forces(int start,int end,rvec f[],rvec flr[])
 +{
 +  int i;
 +
 +  if (gmx_debug_at) {
 +    pr_rvecs(debug,0,"fsr",f+start,end-start);
 +    pr_rvecs(debug,0,"flr",flr+start,end-start);
 +  }
 +  for(i=start; (i<end); i++)
 +    rvec_inc(f[i],flr[i]);
 +}
 +
 +/*
 + * calc_f_el calculates forces due to an electric field.
 + *
 + * force is kJ mol^-1 nm^-1 = e * kJ mol^-1 nm^-1 / e
 + *
 + * Et[] contains the parameters for the time dependent
 + * part of the field (not yet used).
 + * Ex[] contains the parameters for
 + * the spatial dependent part of the field. You can have cool periodic
 + * fields in principle, but only a constant field is supported
 + * now.
 + * The function should return the energy due to the electric field
 + * (if any) but for now returns 0.
 + *
 + * WARNING:
 + * There can be problems with the virial.
 + * Since the field is not self-consistent this is unavoidable.
 + * For neutral molecules the virial is correct within this approximation.
 + * For neutral systems with many charged molecules the error is small.
 + * But for systems with a net charge or a few charged molecules
 + * the error can be significant when the field is high.
 + * Solution: implement a self-consitent electric field into PME.
 + */
 +static void calc_f_el(FILE *fp,int  start,int homenr,
 +                      real charge[],rvec x[],rvec f[],
 +                      t_cosines Ex[],t_cosines Et[],double t)
 +{
 +    rvec Ext;
 +    real t0;
 +    int  i,m;
 +
 +    for(m=0; (m<DIM); m++)
 +    {
 +        if (Et[m].n > 0)
 +        {
 +            if (Et[m].n == 3)
 +            {
 +                t0 = Et[m].a[1];
 +                Ext[m] = cos(Et[m].a[0]*(t-t0))*exp(-sqr(t-t0)/(2.0*sqr(Et[m].a[2])));
 +            }
 +            else
 +            {
 +                Ext[m] = cos(Et[m].a[0]*t);
 +            }
 +        }
 +        else
 +        {
 +            Ext[m] = 1.0;
 +        }
 +        if (Ex[m].n > 0)
 +        {
 +            /* Convert the field strength from V/nm to MD-units */
 +            Ext[m] *= Ex[m].a[0]*FIELDFAC;
 +            for(i=start; (i<start+homenr); i++)
 +                f[i][m] += charge[i]*Ext[m];
 +        }
 +        else
 +        {
 +            Ext[m] = 0;
 +        }
 +    }
 +    if (fp != NULL)
 +    {
 +        fprintf(fp,"%10g  %10g  %10g  %10g #FIELD\n",t,
 +                Ext[XX]/FIELDFAC,Ext[YY]/FIELDFAC,Ext[ZZ]/FIELDFAC);
 +    }
 +}
 +
 +static void calc_virial(FILE *fplog,int start,int homenr,rvec x[],rvec f[],
 +                      tensor vir_part,t_graph *graph,matrix box,
 +                      t_nrnb *nrnb,const t_forcerec *fr,int ePBC)
 +{
 +  int i,j;
 +  tensor virtest;
 +
 +  /* The short-range virial from surrounding boxes */
 +  clear_mat(vir_part);
 +  calc_vir(fplog,SHIFTS,fr->shift_vec,fr->fshift,vir_part,ePBC==epbcSCREW,box);
 +  inc_nrnb(nrnb,eNR_VIRIAL,SHIFTS);
 +
 +  /* Calculate partial virial, for local atoms only, based on short range.
 +   * Total virial is computed in global_stat, called from do_md
 +   */
 +  f_calc_vir(fplog,start,start+homenr,x,f,vir_part,graph,box);
 +  inc_nrnb(nrnb,eNR_VIRIAL,homenr);
 +
 +  /* Add position restraint contribution */
 +  for(i=0; i<DIM; i++) {
 +    vir_part[i][i] += fr->vir_diag_posres[i];
 +  }
 +
 +  /* Add wall contribution */
 +  for(i=0; i<DIM; i++) {
 +    vir_part[i][ZZ] += fr->vir_wall_z[i];
 +  }
 +
 +  if (debug)
 +    pr_rvecs(debug,0,"vir_part",vir_part,DIM);
 +}
 +
 +static void posres_wrapper(FILE *fplog,
 +                           int flags,
 +                           gmx_bool bSepDVDL,
 +                           t_inputrec *ir,
 +                           t_nrnb *nrnb,
 +                           gmx_localtop_t *top,
 +                           matrix box,rvec x[],
 +                           rvec f[],
 +                           gmx_enerdata_t *enerd,
 +                           real *lambda,
 +                           t_forcerec *fr)
 +{
 +    t_pbc pbc;
 +    real  v,dvdl;
 +    int   i;
 +
 +    /* Position restraints always require full pbc */
 +    set_pbc(&pbc,ir->ePBC,box);
 +    dvdl = 0;
 +    v = posres(top->idef.il[F_POSRES].nr,top->idef.il[F_POSRES].iatoms,
 +               top->idef.iparams_posres,
 +               (const rvec*)x,fr->f_novirsum,fr->vir_diag_posres,
 +               ir->ePBC==epbcNONE ? NULL : &pbc,
 +               lambda[efptRESTRAINT],&dvdl,
 +               fr->rc_scaling,fr->ePBC,fr->posres_com,fr->posres_comB);
 +    if (bSepDVDL)
 +    {
 +        fprintf(fplog,sepdvdlformat,
 +                interaction_function[F_POSRES].longname,v,dvdl);
 +    }
 +    enerd->term[F_POSRES] += v;
 +    /* If just the force constant changes, the FEP term is linear,
 +     * but if k changes, it is not.
 +     */
 +    enerd->dvdl_nonlin[efptRESTRAINT] += dvdl;
 +    inc_nrnb(nrnb,eNR_POSRES,top->idef.il[F_POSRES].nr/2);
 +
 +    if ((ir->fepvals->n_lambda > 0) && (flags & GMX_FORCE_DHDL))
 +    {
 +        for(i=0; i<enerd->n_lambda; i++)
 +        {
 +            real dvdl_dum,lambda_dum;
 +
 +            lambda_dum = (i==0 ? lambda[efptRESTRAINT] : ir->fepvals->all_lambda[efptRESTRAINT][i-1]);
 +            v = posres(top->idef.il[F_POSRES].nr,top->idef.il[F_POSRES].iatoms,
 +                       top->idef.iparams_posres,
 +                       (const rvec*)x,NULL,NULL,
 +                       ir->ePBC==epbcNONE ? NULL : &pbc,lambda_dum,&dvdl,
 +                       fr->rc_scaling,fr->ePBC,fr->posres_com,fr->posres_comB);
 +            enerd->enerpart_lambda[i] += v;
 +        }
 +    }
 +}
 +
 +static void pull_potential_wrapper(FILE *fplog,
 +                                   gmx_bool bSepDVDL,
 +                                   t_commrec *cr,
 +                                   t_inputrec *ir,
 +                                   matrix box,rvec x[],
 +                                   rvec f[],
 +                                   tensor vir_force,
 +                                   t_mdatoms *mdatoms,
 +                                   gmx_enerdata_t *enerd,
 +                                   real *lambda,
 +                                   double t)
 +{
 +    t_pbc  pbc;
 +    real   dvdl;
 +
 +    /* Calculate the center of mass forces, this requires communication,
 +     * which is why pull_potential is called close to other communication.
 +     * The virial contribution is calculated directly,
 +     * which is why we call pull_potential after calc_virial.
 +     */
 +    set_pbc(&pbc,ir->ePBC,box);
 +    dvdl = 0; 
 +    enerd->term[F_COM_PULL] +=
 +        pull_potential(ir->ePull,ir->pull,mdatoms,&pbc,
 +                       cr,t,lambda[efptRESTRAINT],x,f,vir_force,&dvdl);
 +    if (bSepDVDL)
 +    {
 +        fprintf(fplog,sepdvdlformat,"Com pull",enerd->term[F_COM_PULL],dvdl);
 +    }
 +    enerd->dvdl_lin[efptRESTRAINT] += dvdl;
 +}
 +
 +static void pme_receive_force_ener(FILE *fplog,
 +                                   gmx_bool bSepDVDL,
 +                                   t_commrec *cr,
 +                                   gmx_wallcycle_t wcycle,
 +                                   gmx_enerdata_t *enerd,
 +                                   t_forcerec *fr)
 +{
 +    real   e,v,dvdl;    
 +    float  cycles_ppdpme,cycles_seppme;
 +
 +    cycles_ppdpme = wallcycle_stop(wcycle,ewcPPDURINGPME);
 +    dd_cycles_add(cr->dd,cycles_ppdpme,ddCyclPPduringPME);
 +
 +    /* In case of node-splitting, the PP nodes receive the long-range 
 +     * forces, virial and energy from the PME nodes here.
 +     */    
 +    wallcycle_start(wcycle,ewcPP_PMEWAITRECVF);
 +    dvdl = 0;
 +    gmx_pme_receive_f(cr,fr->f_novirsum,fr->vir_el_recip,&e,&dvdl,
 +                      &cycles_seppme);
 +    if (bSepDVDL)
 +    {
 +        fprintf(fplog,sepdvdlformat,"PME mesh",e,dvdl);
 +    }
 +    enerd->term[F_COUL_RECIP] += e;
 +    enerd->dvdl_lin[efptCOUL] += dvdl;
 +    if (wcycle)
 +    {
 +        dd_cycles_add(cr->dd,cycles_seppme,ddCyclPME);
 +    }
 +    wallcycle_stop(wcycle,ewcPP_PMEWAITRECVF);
 +}
 +
 +static void print_large_forces(FILE *fp,t_mdatoms *md,t_commrec *cr,
 +                             gmx_large_int_t step,real pforce,rvec *x,rvec *f)
 +{
 +  int  i;
 +  real pf2,fn2;
 +  char buf[STEPSTRSIZE];
 +
 +  pf2 = sqr(pforce);
 +  for(i=md->start; i<md->start+md->homenr; i++) {
 +    fn2 = norm2(f[i]);
 +    /* We also catch NAN, if the compiler does not optimize this away. */
 +    if (fn2 >= pf2 || fn2 != fn2) {
 +      fprintf(fp,"step %s  atom %6d  x %8.3f %8.3f %8.3f  force %12.5e\n",
 +            gmx_step_str(step,buf),
 +            ddglatnr(cr->dd,i),x[i][XX],x[i][YY],x[i][ZZ],sqrt(fn2));
 +    }
 +  }
 +}
 +
 +static void post_process_forces(FILE *fplog,
 +                                t_commrec *cr,
 +                                gmx_large_int_t step,
 +                                t_nrnb *nrnb,gmx_wallcycle_t wcycle,
 +                                gmx_localtop_t *top,
 +                                matrix box,rvec x[],
 +                                rvec f[],
 +                                tensor vir_force,
 +                                t_mdatoms *mdatoms,
 +                                t_graph *graph,
 +                                t_forcerec *fr,gmx_vsite_t *vsite,
 +                                int flags)
 +{
 +    if (fr->bF_NoVirSum)
 +    {
 +        if (vsite)
 +        {
 +            /* Spread the mesh force on virtual sites to the other particles... 
 +             * This is parallellized. MPI communication is performed
 +             * if the constructing atoms aren't local.
 +             */
 +            wallcycle_start(wcycle,ewcVSITESPREAD);
 +            spread_vsite_f(fplog,vsite,x,fr->f_novirsum,NULL,
 +                           (flags & GMX_FORCE_VIRIAL),fr->vir_el_recip,
 +                           nrnb,
 +                           &top->idef,fr->ePBC,fr->bMolPBC,graph,box,cr);
 +            wallcycle_stop(wcycle,ewcVSITESPREAD);
 +        }
 +        if (flags & GMX_FORCE_VIRIAL)
 +        {
 +            /* Now add the forces, this is local */
 +            if (fr->bDomDec)
 +            {
 +                sum_forces(0,fr->f_novirsum_n,f,fr->f_novirsum);
 +            }
 +            else
 +            {
 +                sum_forces(mdatoms->start,mdatoms->start+mdatoms->homenr,
 +                           f,fr->f_novirsum);
 +            }
 +            if (EEL_FULL(fr->eeltype))
 +            {
 +                /* Add the mesh contribution to the virial */
 +                m_add(vir_force,fr->vir_el_recip,vir_force);
 +            }
 +            if (debug)
 +            {
 +                pr_rvecs(debug,0,"vir_force",vir_force,DIM);
 +            }
 +        }
 +    }
 +    
 +    if (fr->print_force >= 0)
 +    {
 +        print_large_forces(stderr,mdatoms,cr,step,fr->print_force,x,f);
 +    }
 +}
 +
 +static void do_nb_verlet(t_forcerec *fr,
 +                         interaction_const_t *ic,
 +                         gmx_enerdata_t *enerd,
 +                         int flags, int ilocality,
 +                         int clearF,
 +                         t_nrnb *nrnb,
 +                         gmx_wallcycle_t wcycle)
 +{
 +    int     nnbl, kernel_type, enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
 +    char    *env;
 +    nonbonded_verlet_group_t  *nbvg;
 +
 +    if (!(flags & GMX_FORCE_NONBONDED))
 +    {
 +        /* skip non-bonded calculation */
 +        return;
 +    }
 +
 +    nbvg = &fr->nbv->grp[ilocality];
 +
 +    /* CUDA kernel launch overhead is already timed separately */
 +    if (fr->cutoff_scheme != ecutsVERLET)
 +    {
 +        gmx_incons("Invalid cut-off scheme passed!");
 +    }
 +
 +    if (nbvg->kernel_type != nbnxnk8x8x8_CUDA)
 +    {
 +        wallcycle_sub_start(wcycle, ewcsNONBONDED);
 +    }
 +    switch (nbvg->kernel_type)
 +    {
 +        case nbnxnk4x4_PlainC:
 +            nbnxn_kernel_ref(&nbvg->nbl_lists,
 +                             nbvg->nbat, ic,
 +                             fr->shift_vec,
 +                             flags,
 +                             clearF,
 +                             fr->fshift[0],
 +                             enerd->grpp.ener[egCOULSR],
 +                             fr->bBHAM ?
 +                             enerd->grpp.ener[egBHAMSR] :
 +                             enerd->grpp.ener[egLJSR]);
 +            break;
 +        
 +        case nbnxnk4xN_SIMD_4xN:
 +            nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
 +                                  nbvg->nbat, ic,
 +                                  nbvg->ewald_excl,
 +                                  fr->shift_vec,
 +                                  flags,
 +                                  clearF,
 +                                  fr->fshift[0],
 +                                  enerd->grpp.ener[egCOULSR],
 +                                  fr->bBHAM ?
 +                                  enerd->grpp.ener[egBHAMSR] :
 +                                  enerd->grpp.ener[egLJSR]);
 +            break;
 +        case nbnxnk4xN_SIMD_2xNN:
 +            nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
 +                                   nbvg->nbat, ic,
 +                                   nbvg->ewald_excl,
 +                                   fr->shift_vec,
 +                                   flags,
 +                                   clearF,
 +                                   fr->fshift[0],
 +                                   enerd->grpp.ener[egCOULSR],
 +                                   fr->bBHAM ?
 +                                   enerd->grpp.ener[egBHAMSR] :
 +                                   enerd->grpp.ener[egLJSR]);
 +            break;
 +
 +        case nbnxnk8x8x8_CUDA:
 +            nbnxn_cuda_launch_kernel(fr->nbv->cu_nbv, nbvg->nbat, flags, ilocality);
 +            break;
 +
 +        case nbnxnk8x8x8_PlainC:
 +            nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
 +                                 nbvg->nbat, ic,
 +                                 fr->shift_vec,
 +                                 flags,
 +                                 clearF,
 +                                 nbvg->nbat->out[0].f,
 +                                 fr->fshift[0],
 +                                 enerd->grpp.ener[egCOULSR],
 +                                 fr->bBHAM ?
 +                                 enerd->grpp.ener[egBHAMSR] :
 +                                 enerd->grpp.ener[egLJSR]);
 +            break;
 +
 +        default:
 +            gmx_incons("Invalid nonbonded kernel type passed!");
 +
 +    }
 +    if (nbvg->kernel_type != nbnxnk8x8x8_CUDA)
 +    {
 +        wallcycle_sub_stop(wcycle, ewcsNONBONDED);
 +    }
 +
 +    if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
 +    {
 +        enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
 +    }
 +    else if (nbvg->ewald_excl == ewaldexclTable)
 +    {
 +        enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
 +    }
 +    else
 +    {
 +        enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
 +    }
 +    enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
 +    if (flags & GMX_FORCE_ENERGY)
 +    {
 +        /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
 +        enr_nbnxn_kernel_ljc += 1;
 +        enr_nbnxn_kernel_lj  += 1;
 +    }
 +
 +    inc_nrnb(nrnb,enr_nbnxn_kernel_ljc,
 +             nbvg->nbl_lists.natpair_ljq);
 +    inc_nrnb(nrnb,enr_nbnxn_kernel_lj,
 +             nbvg->nbl_lists.natpair_lj);
 +    inc_nrnb(nrnb,enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
 +             nbvg->nbl_lists.natpair_q);
 +}
 +
 +void do_force_cutsVERLET(FILE *fplog,t_commrec *cr,
 +              t_inputrec *inputrec,
 +              gmx_large_int_t step,t_nrnb *nrnb,gmx_wallcycle_t wcycle,
 +              gmx_localtop_t *top,
 +              gmx_mtop_t *mtop,
 +              gmx_groups_t *groups,
 +              matrix box,rvec x[],history_t *hist,
 +              rvec f[],
 +              tensor vir_force,
 +              t_mdatoms *mdatoms,
 +              gmx_enerdata_t *enerd,t_fcdata *fcd,
 +              real *lambda,t_graph *graph,
 +              t_forcerec *fr, interaction_const_t *ic,
 +              gmx_vsite_t *vsite,rvec mu_tot,
 +              double t,FILE *field,gmx_edsam_t ed,
 +              gmx_bool bBornRadii,
 +              int flags)
 +{
 +    int     cg0,cg1,i,j;
 +    int     start,homenr;
 +    int     nb_kernel_type;
 +    double  mu[2*DIM];
 +    gmx_bool   bSepDVDL,bStateChanged,bNS,bFillGrid,bCalcCGCM,bBS;
 +    gmx_bool   bDoLongRange,bDoForces,bSepLRF,bUseGPU,bUseOrEmulGPU;
 +    gmx_bool   bDiffKernels=FALSE;
 +    matrix  boxs;
 +    rvec    vzero,box_diag;
 +    real    e,v,dvdl;
 +    float  cycles_pme,cycles_force;
 +    nonbonded_verlet_t *nbv;
 +
 +    cycles_force = 0;
 +    nbv = fr->nbv;
 +    nb_kernel_type = fr->nbv->grp[0].kernel_type;
 +
 +    start  = mdatoms->start;
 +    homenr = mdatoms->homenr;
 +
 +    bSepDVDL = (fr->bSepDVDL && do_per_step(step,inputrec->nstlog));
 +
 +    clear_mat(vir_force);
 +
 +    cg0 = 0;
 +    if (DOMAINDECOMP(cr))
 +    {
 +        cg1 = cr->dd->ncg_tot;
 +    }
 +    else
 +    {
 +        cg1 = top->cgs.nr;
 +    }
 +    if (fr->n_tpi > 0)
 +    {
 +        cg1--;
 +    }
 +
 +    bStateChanged = (flags & GMX_FORCE_STATECHANGED);
 +    bNS           = (flags & GMX_FORCE_NS) && (fr->bAllvsAll==FALSE); 
 +    bFillGrid     = (bNS && bStateChanged);
 +    bCalcCGCM     = (bFillGrid && !DOMAINDECOMP(cr));
 +    bDoLongRange  = (fr->bTwinRange && bNS && (flags & GMX_FORCE_DO_LR));
 +    bDoForces     = (flags & GMX_FORCE_FORCES);
 +    bSepLRF       = (bDoLongRange && bDoForces && (flags & GMX_FORCE_SEPLRF));
 +    bUseGPU       = fr->nbv->bUseGPU;
 +    bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
 +
 +    if (bStateChanged)
 +    {
 +        update_forcerec(fplog,fr,box);
 +
 +        if (NEED_MUTOT(*inputrec))
 +        {
 +            /* Calculate total (local) dipole moment in a temporary common array.
 +             * This makes it possible to sum them over nodes faster.
 +             */
 +            calc_mu(start,homenr,
 +                    x,mdatoms->chargeA,mdatoms->chargeB,mdatoms->nChargePerturbed,
 +                    mu,mu+DIM);
 +        }
 +    }
 +
 +    if (fr->ePBC != epbcNONE) { 
 +        /* Compute shift vectors every step,
 +         * because of pressure coupling or box deformation!
 +         */
 +        if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
 +            calc_shifts(box,fr->shift_vec);
 +
 +        if (bCalcCGCM) { 
 +            put_atoms_in_box_omp(fr->ePBC,box,homenr,x);
 +            inc_nrnb(nrnb,eNR_SHIFTX,homenr);
 +        } 
 +        else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph) {
 +            unshift_self(graph,box,x);
 +        }
 +    } 
 +
 +    nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
 +                                  fr->shift_vec,nbv->grp[0].nbat);
 +
 +#ifdef GMX_MPI
 +    if (!(cr->duty & DUTY_PME)) {
 +        /* Send particle coordinates to the pme nodes.
 +         * Since this is only implemented for domain decomposition
 +         * and domain decomposition does not use the graph,
 +         * we do not need to worry about shifting.
 +         */    
 +
 +        wallcycle_start(wcycle,ewcPP_PMESENDX);
 +
 +        bBS = (inputrec->nwall == 2);
 +        if (bBS) {
 +            copy_mat(box,boxs);
 +            svmul(inputrec->wall_ewald_zfac,boxs[ZZ],boxs[ZZ]);
 +        }
 +
 +        gmx_pme_send_x(cr,bBS ? boxs : box,x,
 +                       mdatoms->nChargePerturbed,lambda[efptCOUL],
 +                       (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),step);
 +
 +        wallcycle_stop(wcycle,ewcPP_PMESENDX);
 +    }
 +#endif /* GMX_MPI */
 +
 +    /* do gridding for pair search */
 +    if (bNS)
 +    {
 +        if (graph && bStateChanged)
 +        {
 +            /* Calculate intramolecular shift vectors to make molecules whole */
 +            mk_mshift(fplog,graph,fr->ePBC,box,x);
 +        }
 +
 +        clear_rvec(vzero);
 +        box_diag[XX] = box[XX][XX];
 +        box_diag[YY] = box[YY][YY];
 +        box_diag[ZZ] = box[ZZ][ZZ];
 +
 +        wallcycle_start(wcycle,ewcNS);
 +        if (!fr->bDomDec)
 +        {
 +            wallcycle_sub_start(wcycle,ewcsNBS_GRID_LOCAL);
 +            nbnxn_put_on_grid(nbv->nbs,fr->ePBC,box,
 +                              0,vzero,box_diag,
 +                              0,mdatoms->homenr,-1,fr->cginfo,x,
 +                              0,NULL,
 +                              nbv->grp[eintLocal].kernel_type,
 +                              nbv->grp[eintLocal].nbat);
 +            wallcycle_sub_stop(wcycle,ewcsNBS_GRID_LOCAL);
 +        }
 +        else
 +        {
 +            wallcycle_sub_start(wcycle,ewcsNBS_GRID_NONLOCAL);
 +            nbnxn_put_on_grid_nonlocal(nbv->nbs,domdec_zones(cr->dd),
 +                                       fr->cginfo,x,
 +                                       nbv->grp[eintNonlocal].kernel_type,
 +                                       nbv->grp[eintNonlocal].nbat);
 +            wallcycle_sub_stop(wcycle,ewcsNBS_GRID_NONLOCAL);
 +        }
 +
 +        if (nbv->ngrp == 1 ||
 +            nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
 +        {
 +            nbnxn_atomdata_set(nbv->grp[eintLocal].nbat,eatAll,
 +                                nbv->nbs,mdatoms,fr->cginfo);
 +        }
 +        else
 +        {
 +            nbnxn_atomdata_set(nbv->grp[eintLocal].nbat,eatLocal,
 +                                nbv->nbs,mdatoms,fr->cginfo);
 +            nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat,eatAll,
 +                                nbv->nbs,mdatoms,fr->cginfo);
 +        }
 +        wallcycle_stop(wcycle, ewcNS);
 +    }
 +
 +    /* initialize the GPU atom data and copy shift vector */
 +    if (bUseGPU)
 +    {
 +        if (bNS)
 +        {
 +            wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
 +            nbnxn_cuda_init_atomdata(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
 +            wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
 +        }
 +
 +        wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
 +        nbnxn_cuda_upload_shiftvec(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
 +        wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
 +    }
 +
 +    /* do local pair search */
 +    if (bNS)
 +    {
 +        wallcycle_start_nocount(wcycle,ewcNS);
 +        wallcycle_sub_start(wcycle,ewcsNBS_SEARCH_LOCAL);
 +        nbnxn_make_pairlist(nbv->nbs,nbv->grp[eintLocal].nbat,
 +                            &top->excls,
 +                            ic->rlist,
 +                            nbv->min_ci_balanced,
 +                            &nbv->grp[eintLocal].nbl_lists,
 +                            eintLocal,
 +                            nbv->grp[eintLocal].kernel_type,
 +                            nrnb);
 +        wallcycle_sub_stop(wcycle,ewcsNBS_SEARCH_LOCAL);
 +
 +        if (bUseGPU)
 +        {
 +            /* initialize local pair-list on the GPU */
 +            nbnxn_cuda_init_pairlist(nbv->cu_nbv,
 +                                     nbv->grp[eintLocal].nbl_lists.nbl[0],
 +                                     eintLocal);
 +        }
 +        wallcycle_stop(wcycle, ewcNS);
 +    }
 +    else
 +    {
 +        wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
 +        wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
 +        nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs,eatLocal,FALSE,x,
 +                                        nbv->grp[eintLocal].nbat);
 +        wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
 +        wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
 +    }
 +
 +    if (bUseGPU)
 +    {
 +        wallcycle_start(wcycle,ewcLAUNCH_GPU_NB);
 +        /* launch local nonbonded F on GPU */
 +        do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
 +                     nrnb, wcycle);
 +        wallcycle_stop(wcycle,ewcLAUNCH_GPU_NB);
 +    }
 +
 +    /* Communicate coordinates and sum dipole if necessary + 
 +       do non-local pair search */
 +    if (DOMAINDECOMP(cr))
 +    {
 +        bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
 +                        nbv->grp[eintLocal].kernel_type);
 +
 +        if (bDiffKernels)
 +        {
 +            /* With GPU+CPU non-bonded calculations we need to copy
 +             * the local coordinates to the non-local nbat struct
 +             * (in CPU format) as the non-local kernel call also
 +             * calculates the local - non-local interactions.
 +             */
 +            wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
 +            wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
 +            nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs,eatLocal,TRUE,x,
 +                                             nbv->grp[eintNonlocal].nbat);
 +            wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
 +            wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
 +        }
 +
 +        if (bNS)
 +        {
 +            wallcycle_start_nocount(wcycle,ewcNS);
 +            wallcycle_sub_start(wcycle,ewcsNBS_SEARCH_NONLOCAL);
 +
 +            if (bDiffKernels)
 +            {
 +                nbnxn_grid_add_simple(nbv->nbs,nbv->grp[eintNonlocal].nbat);
 +            }
 +
 +            nbnxn_make_pairlist(nbv->nbs,nbv->grp[eintNonlocal].nbat,
 +                                &top->excls,
 +                                ic->rlist,
 +                                nbv->min_ci_balanced,
 +                                &nbv->grp[eintNonlocal].nbl_lists,
 +                                eintNonlocal,
 +                                nbv->grp[eintNonlocal].kernel_type,
 +                                nrnb);
 +
 +            wallcycle_sub_stop(wcycle,ewcsNBS_SEARCH_NONLOCAL);
 +
 +            if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_CUDA)
 +            {
 +                /* initialize non-local pair-list on the GPU */
 +                nbnxn_cuda_init_pairlist(nbv->cu_nbv,
 +                                         nbv->grp[eintNonlocal].nbl_lists.nbl[0],
 +                                         eintNonlocal);
 +            }
 +            wallcycle_stop(wcycle,ewcNS);
 +        } 
 +        else
 +        {
 +            wallcycle_start(wcycle,ewcMOVEX);
 +            dd_move_x(cr->dd,box,x);
 +
 +            /* When we don't need the total dipole we sum it in global_stat */
 +            if (bStateChanged && NEED_MUTOT(*inputrec))
 +            {
 +                gmx_sumd(2*DIM,mu,cr);
 +            }
 +            wallcycle_stop(wcycle,ewcMOVEX);
 +
 +            wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
 +            wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
 +            nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs,eatNonlocal,FALSE,x,
 +                                            nbv->grp[eintNonlocal].nbat);
 +            wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
 +            cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
 +        }
 +
 +        if (bUseGPU && !bDiffKernels)
 +        { 
 +            wallcycle_start(wcycle,ewcLAUNCH_GPU_NB);
 +            /* launch non-local nonbonded F on GPU */
 +            do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
 +                         nrnb, wcycle);
 +            cycles_force += wallcycle_stop(wcycle,ewcLAUNCH_GPU_NB);
 +        }
 +    }
 +
 +    if (bUseGPU)
 +    {
 +        /* launch D2H copy-back F */
 +        wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
 +        if (DOMAINDECOMP(cr) && !bDiffKernels)
 +        {
 +            nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintNonlocal].nbat,
 +                                      flags, eatNonlocal);
 +        }
 +        nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintLocal].nbat,
 +                                  flags, eatLocal);
 +        cycles_force += wallcycle_stop(wcycle,ewcLAUNCH_GPU_NB);
 +    }
 +
 +    if (bStateChanged && NEED_MUTOT(*inputrec))
 +    {
 +        if (PAR(cr))
 +        {
 +            gmx_sumd(2*DIM,mu,cr);
 +        } 
 +
 +        for(i=0; i<2; i++)
 +        {
 +            for(j=0;j<DIM;j++)
 +            {
 +                fr->mu_tot[i][j] = mu[i*DIM + j];
 +            }
 +        }
 +    }
 +    if (fr->efep == efepNO)
 +    {
 +        copy_rvec(fr->mu_tot[0],mu_tot);
 +    }
 +    else
 +    {
 +        for(j=0; j<DIM; j++)
 +        {
 +            mu_tot[j] =
 +                (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
 +                lambda[efptCOUL]*fr->mu_tot[1][j];
 +        }
 +    }
 +
 +    /* Reset energies */
 +    reset_enerdata(&(inputrec->opts),fr,bNS,enerd,MASTER(cr));
 +    clear_rvecs(SHIFTS,fr->fshift);
 +
 +    if (DOMAINDECOMP(cr))
 +    {
 +        if (!(cr->duty & DUTY_PME))
 +        {
 +            wallcycle_start(wcycle,ewcPPDURINGPME);
 +            dd_force_flop_start(cr->dd,nrnb);
 +        }
 +    }
 +    
 +    /* Start the force cycle counter.
 +     * This counter is stopped in do_forcelow_level.
 +     * No parallel communication should occur while this counter is running,
 +     * since that will interfere with the dynamic load balancing.
 +     */
 +    wallcycle_start(wcycle,ewcFORCE);
 +    if (bDoForces)
 +    {
 +        /* Reset forces for which the virial is calculated separately:
 +         * PME/Ewald forces if necessary */
 +        if (fr->bF_NoVirSum) 
 +        {
 +            if (flags & GMX_FORCE_VIRIAL)
 +            {
 +                fr->f_novirsum = fr->f_novirsum_alloc;
 +                if (fr->bDomDec)
 +                {
 +                    clear_rvecs(fr->f_novirsum_n,fr->f_novirsum);
 +                }
 +                else
 +                {
 +                    clear_rvecs(homenr,fr->f_novirsum+start);
 +                }
 +            }
 +            else
 +            {
 +                /* We are not calculating the pressure so we do not need
 +                 * a separate array for forces that do not contribute
 +                 * to the pressure.
 +                 */
 +                fr->f_novirsum = f;
 +            }
 +        }
 +
 +        /* Clear the short- and long-range forces */
 +        clear_rvecs(fr->natoms_force_constr,f);
 +        if(bSepLRF && do_per_step(step,inputrec->nstcalclr))
 +        {
 +            clear_rvecs(fr->natoms_force_constr,fr->f_twin);
 +        }
 +        
 +        clear_rvec(fr->vir_diag_posres);
 +    }
 +    if (inputrec->ePull == epullCONSTRAINT)
 +    {
 +        clear_pull_forces(inputrec->pull);
 +    }
 +
 +    /* update QMMMrec, if necessary */
 +    if(fr->bQMMM)
 +    {
 +        update_QMMMrec(cr,fr,x,mdatoms,box,top);
 +    }
 +
 +    if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
 +    {
 +        posres_wrapper(fplog,flags,bSepDVDL,inputrec,nrnb,top,box,x,
 +                       f,enerd,lambda,fr);
 +    }
 +
 +    /* Compute the bonded and non-bonded energies and optionally forces */    
 +    do_force_lowlevel(fplog,step,fr,inputrec,&(top->idef),
 +                      cr,nrnb,wcycle,mdatoms,&(inputrec->opts),
 +                      x,hist,f, bSepLRF ? fr->f_twin : f,enerd,fcd,mtop,top,fr->born,
 +                      &(top->atomtypes),bBornRadii,box,
 +                      inputrec->fepvals,lambda,graph,&(top->excls),fr->mu_tot,
 +                      flags, &cycles_pme);
 +
 +    if(bSepLRF)
 +    {
 +        if (do_per_step(step,inputrec->nstcalclr))
 +        {
 +            /* Add the long range forces to the short range forces */
 +            for(i=0; i<fr->natoms_force_constr; i++)
 +            {
 +                rvec_add(fr->f_twin[i],f[i],f[i]);
 +            }
 +        }
 +    }
 +    
 +    if (!bUseOrEmulGPU)
 +    {
 +        /* Maybe we should move this into do_force_lowlevel */
 +        do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
 +                     nrnb, wcycle);
 +    }
 +        
 +
 +    if (!bUseOrEmulGPU || bDiffKernels)
 +    {
 +        int aloc;
 +
 +        if (DOMAINDECOMP(cr))
 +        {
 +            do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
 +                         bDiffKernels ? enbvClearFYes : enbvClearFNo,
 +                         nrnb, wcycle);
 +        }
 +
 +        if (!bUseOrEmulGPU)
 +        {
 +            aloc = eintLocal;
 +        }
 +        else
 +        {
 +            aloc = eintNonlocal;
 +        }
 +
 +        /* Add all the non-bonded force to the normal force array.
 +         * This can be split into a local a non-local part when overlapping
 +         * communication with calculation with domain decomposition.
 +         */
 +        cycles_force += wallcycle_stop(wcycle,ewcFORCE);
 +        wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
 +        wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
 +        nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs,eatAll,nbv->grp[aloc].nbat,f);
 +        wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
 +        cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
 +        wallcycle_start_nocount(wcycle,ewcFORCE);
 +
 +        /* if there are multiple fshift output buffers reduce them */
 +        if ((flags & GMX_FORCE_VIRIAL) &&
 +            nbv->grp[aloc].nbl_lists.nnbl > 1)
 +        {
 +            nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
 +                                                      fr->fshift);
 +        }
 +    }
 +    
 +    cycles_force += wallcycle_stop(wcycle,ewcFORCE);
 +    
 +    if (ed)
 +    {
-         do_flood(fplog,cr,x,f,ed,box,step,bNS);
++        do_flood(cr,inputrec,x,f,ed,box,step,bNS);
 +    }
 +
 +    if (bUseOrEmulGPU && !bDiffKernels)
 +    {
 +        /* wait for non-local forces (or calculate in emulation mode) */
 +        if (DOMAINDECOMP(cr))
 +        {
 +            if (bUseGPU)
 +            {
 +                wallcycle_start(wcycle,ewcWAIT_GPU_NB_NL);
 +                nbnxn_cuda_wait_gpu(nbv->cu_nbv,
 +                                    nbv->grp[eintNonlocal].nbat,
 +                                    flags, eatNonlocal,
 +                                    enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
 +                                    fr->fshift);
 +                cycles_force += wallcycle_stop(wcycle,ewcWAIT_GPU_NB_NL);
 +            }
 +            else
 +            {
 +                wallcycle_start_nocount(wcycle,ewcFORCE);
 +                do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
 +                             nrnb, wcycle);
 +                cycles_force += wallcycle_stop(wcycle,ewcFORCE);
 +            }            
 +            wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
 +            wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
 +            /* skip the reduction if there was no non-local work to do */
 +            if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
 +            {
 +                nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs,eatNonlocal,
 +                                               nbv->grp[eintNonlocal].nbat,f);
 +            }
 +            wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
 +            cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
 +        }
 +    }
 +
 +    if (bDoForces)
 +    {
 +        /* Communicate the forces */
 +        if (PAR(cr))
 +        {
 +            wallcycle_start(wcycle,ewcMOVEF);
 +            if (DOMAINDECOMP(cr))
 +            {
 +                dd_move_f(cr->dd,f,fr->fshift);
 +                /* Do we need to communicate the separate force array
 +                 * for terms that do not contribute to the single sum virial?
 +                 * Position restraints and electric fields do not introduce
 +                 * inter-cg forces, only full electrostatics methods do.
 +                 * When we do not calculate the virial, fr->f_novirsum = f,
 +                 * so we have already communicated these forces.
 +                 */
 +                if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
 +                    (flags & GMX_FORCE_VIRIAL))
 +                {
 +                    dd_move_f(cr->dd,fr->f_novirsum,NULL);
 +                }
 +                if (bSepLRF)
 +                {
 +                    /* We should not update the shift forces here,
 +                     * since f_twin is already included in f.
 +                     */
 +                    dd_move_f(cr->dd,fr->f_twin,NULL);
 +                }
 +            }
 +            wallcycle_stop(wcycle,ewcMOVEF);
 +        }
 +    }
 + 
 +    if (bUseOrEmulGPU)
 +    {
 +        /* wait for local forces (or calculate in emulation mode) */
 +        if (bUseGPU)
 +        {
 +            wallcycle_start(wcycle,ewcWAIT_GPU_NB_L);
 +            nbnxn_cuda_wait_gpu(nbv->cu_nbv,
 +                                nbv->grp[eintLocal].nbat,
 +                                flags, eatLocal,
 +                                enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
 +                                fr->fshift);
 +            wallcycle_stop(wcycle,ewcWAIT_GPU_NB_L);
 +
 +            /* now clear the GPU outputs while we finish the step on the CPU */
++
++            wallcycle_start_nocount(wcycle,ewcLAUNCH_GPU_NB);
 +            nbnxn_cuda_clear_outputs(nbv->cu_nbv, flags);
++            wallcycle_stop(wcycle,ewcLAUNCH_GPU_NB);
 +        }
 +        else
 +        {            
 +            wallcycle_start_nocount(wcycle,ewcFORCE);
 +            do_nb_verlet(fr, ic, enerd, flags, eintLocal,
 +                         DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
 +                         nrnb, wcycle);
 +            wallcycle_stop(wcycle,ewcFORCE);
 +        }
 +        wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
 +        wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
 +        if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
 +        {
 +            /* skip the reduction if there was no non-local work to do */
 +            nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs,eatLocal,
 +                                           nbv->grp[eintLocal].nbat,f);
 +        }
 +        wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
 +        wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
 +    }
 +    
 +    if (DOMAINDECOMP(cr))
 +    {
 +        dd_force_flop_stop(cr->dd,nrnb);
 +        if (wcycle)
 +        {
 +            dd_cycles_add(cr->dd,cycles_force-cycles_pme,ddCyclF);
 +        }
 +    }
 +
 +    if (bDoForces)
 +    {
 +        if (IR_ELEC_FIELD(*inputrec))
 +        {
 +            /* Compute forces due to electric field */
 +            calc_f_el(MASTER(cr) ? field : NULL,
 +                      start,homenr,mdatoms->chargeA,x,fr->f_novirsum,
 +                      inputrec->ex,inputrec->et,t);
 +        }
 +
 +        /* If we have NoVirSum forces, but we do not calculate the virial,
 +         * we sum fr->f_novirum=f later.
 +         */
 +        if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
 +        {
 +            wallcycle_start(wcycle,ewcVSITESPREAD);
 +            spread_vsite_f(fplog,vsite,x,f,fr->fshift,FALSE,NULL,nrnb,
 +                           &top->idef,fr->ePBC,fr->bMolPBC,graph,box,cr);
 +            wallcycle_stop(wcycle,ewcVSITESPREAD);
 +
 +            if (bSepLRF)
 +            {
 +                wallcycle_start(wcycle,ewcVSITESPREAD);
 +                spread_vsite_f(fplog,vsite,x,fr->f_twin,NULL,FALSE,NULL,
 +                               nrnb,
 +                               &top->idef,fr->ePBC,fr->bMolPBC,graph,box,cr);
 +                wallcycle_stop(wcycle,ewcVSITESPREAD);
 +            }
 +        }
 +
 +        if (flags & GMX_FORCE_VIRIAL)
 +        {
 +            /* Calculation of the virial must be done after vsites! */
 +            calc_virial(fplog,mdatoms->start,mdatoms->homenr,x,f,
 +                        vir_force,graph,box,nrnb,fr,inputrec->ePBC);
 +        }
 +    }
 +
 +    if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
 +    {
 +        pull_potential_wrapper(fplog,bSepDVDL,cr,inputrec,box,x,
 +                               f,vir_force,mdatoms,enerd,lambda,t);
 +    }
 +
 +    if (PAR(cr) && !(cr->duty & DUTY_PME))
 +    {
 +        /* In case of node-splitting, the PP nodes receive the long-range 
 +         * forces, virial and energy from the PME nodes here.
 +         */    
 +        pme_receive_force_ener(fplog,bSepDVDL,cr,wcycle,enerd,fr);
 +    }
 +
 +    if (bDoForces)
 +    {
 +        post_process_forces(fplog,cr,step,nrnb,wcycle,
 +                            top,box,x,f,vir_force,mdatoms,graph,fr,vsite,
 +                            flags);
 +    }
 +    
 +    /* Sum the potential energy terms from group contributions */
 +    sum_epot(&(inputrec->opts),&(enerd->grpp),enerd->term);
 +}
 +
 +void do_force_cutsGROUP(FILE *fplog,t_commrec *cr,
 +              t_inputrec *inputrec,
 +              gmx_large_int_t step,t_nrnb *nrnb,gmx_wallcycle_t wcycle,
 +              gmx_localtop_t *top,
 +              gmx_mtop_t *mtop,
 +              gmx_groups_t *groups,
 +              matrix box,rvec x[],history_t *hist,
 +              rvec f[],
 +              tensor vir_force,
 +              t_mdatoms *mdatoms,
 +              gmx_enerdata_t *enerd,t_fcdata *fcd,
 +              real *lambda,t_graph *graph,
 +              t_forcerec *fr,gmx_vsite_t *vsite,rvec mu_tot,
 +              double t,FILE *field,gmx_edsam_t ed,
 +              gmx_bool bBornRadii,
 +              int flags)
 +{
 +    int    cg0,cg1,i,j;
 +    int    start,homenr;
 +    double mu[2*DIM];
 +    gmx_bool   bSepDVDL,bStateChanged,bNS,bFillGrid,bCalcCGCM,bBS;
 +    gmx_bool   bDoLongRangeNS,bDoForces,bDoPotential,bSepLRF;
 +    gmx_bool   bDoAdressWF;
 +    matrix boxs;
 +    rvec   vzero,box_diag;
 +    real   e,v,dvdlambda[efptNR];
 +    t_pbc  pbc;
 +    float  cycles_pme,cycles_force;
 +
 +    start  = mdatoms->start;
 +    homenr = mdatoms->homenr;
 +
 +    bSepDVDL = (fr->bSepDVDL && do_per_step(step,inputrec->nstlog));
 +
 +    clear_mat(vir_force);
 +
 +    if (PARTDECOMP(cr))
 +    {
 +        pd_cg_range(cr,&cg0,&cg1);
 +    }
 +    else
 +    {
 +        cg0 = 0;
 +        if (DOMAINDECOMP(cr))
 +        {
 +            cg1 = cr->dd->ncg_tot;
 +        }
 +        else
 +        {
 +            cg1 = top->cgs.nr;
 +        }
 +        if (fr->n_tpi > 0)
 +        {
 +            cg1--;
 +        }
 +    }
 +
 +    bStateChanged  = (flags & GMX_FORCE_STATECHANGED);
 +    bNS            = (flags & GMX_FORCE_NS) && (fr->bAllvsAll==FALSE);
 +    /* Should we update the long-range neighborlists at this step? */
 +    bDoLongRangeNS = fr->bTwinRange && bNS;
 +    /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
 +    bFillGrid      = (bNS && bStateChanged);
 +    bCalcCGCM      = (bFillGrid && !DOMAINDECOMP(cr));
 +    bDoForces      = (flags & GMX_FORCE_FORCES);
 +    bDoPotential   = (flags & GMX_FORCE_ENERGY);
 +    bSepLRF        = ((inputrec->nstcalclr>1) && bDoForces &&
 +                      (flags & GMX_FORCE_SEPLRF) && (flags & GMX_FORCE_DO_LR));
 +
 +    /* should probably move this to the forcerec since it doesn't change */
 +    bDoAdressWF   = ((fr->adress_type!=eAdressOff));
 +
 +    if (bStateChanged)
 +    {
 +        update_forcerec(fplog,fr,box);
 +
 +        if (NEED_MUTOT(*inputrec))
 +        {
 +            /* Calculate total (local) dipole moment in a temporary common array.
 +             * This makes it possible to sum them over nodes faster.
 +             */
 +            calc_mu(start,homenr,
 +                    x,mdatoms->chargeA,mdatoms->chargeB,mdatoms->nChargePerturbed,
 +                    mu,mu+DIM);
 +        }
 +    }
 +
 +    if (fr->ePBC != epbcNONE) { 
 +        /* Compute shift vectors every step,
 +         * because of pressure coupling or box deformation!
 +         */
 +        if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
 +            calc_shifts(box,fr->shift_vec);
 +
 +        if (bCalcCGCM) { 
 +            put_charge_groups_in_box(fplog,cg0,cg1,fr->ePBC,box,
 +                    &(top->cgs),x,fr->cg_cm);
 +            inc_nrnb(nrnb,eNR_CGCM,homenr);
 +            inc_nrnb(nrnb,eNR_RESETX,cg1-cg0);
 +        } 
 +        else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph) {
 +            unshift_self(graph,box,x);
 +        }
 +    } 
 +    else if (bCalcCGCM) {
 +        calc_cgcm(fplog,cg0,cg1,&(top->cgs),x,fr->cg_cm);
 +        inc_nrnb(nrnb,eNR_CGCM,homenr);
 +    }
 +
 +    if (bCalcCGCM) {
 +        if (PAR(cr)) {
 +            move_cgcm(fplog,cr,fr->cg_cm);
 +        }
 +        if (gmx_debug_at)
 +            pr_rvecs(debug,0,"cgcm",fr->cg_cm,top->cgs.nr);
 +    }
 +
 +#ifdef GMX_MPI
 +    if (!(cr->duty & DUTY_PME)) {
 +        /* Send particle coordinates to the pme nodes.
 +         * Since this is only implemented for domain decomposition
 +         * and domain decomposition does not use the graph,
 +         * we do not need to worry about shifting.
 +         */    
 +
 +        wallcycle_start(wcycle,ewcPP_PMESENDX);
 +
 +        bBS = (inputrec->nwall == 2);
 +        if (bBS) {
 +            copy_mat(box,boxs);
 +            svmul(inputrec->wall_ewald_zfac,boxs[ZZ],boxs[ZZ]);
 +        }
 +
 +        gmx_pme_send_x(cr,bBS ? boxs : box,x,
 +                       mdatoms->nChargePerturbed,lambda[efptCOUL],
 +                       (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),step);
 +
 +        wallcycle_stop(wcycle,ewcPP_PMESENDX);
 +    }
 +#endif /* GMX_MPI */
 +
 +    /* Communicate coordinates and sum dipole if necessary */
 +    if (PAR(cr))
 +    {
 +        wallcycle_start(wcycle,ewcMOVEX);
 +        if (DOMAINDECOMP(cr))
 +        {
 +            dd_move_x(cr->dd,box,x);
 +        }
 +        else
 +        {
 +            move_x(fplog,cr,GMX_LEFT,GMX_RIGHT,x,nrnb);
 +        }
 +        wallcycle_stop(wcycle,ewcMOVEX);
 +    }
 +
 +    /* update adress weight beforehand */
 +    if(bStateChanged && bDoAdressWF)
 +    {
 +        /* need pbc for adress weight calculation with pbc_dx */
 +        set_pbc(&pbc,inputrec->ePBC,box);
 +        if(fr->adress_site == eAdressSITEcog)
 +        {
 +            update_adress_weights_cog(top->idef.iparams,top->idef.il,x,fr,mdatoms,
 +                                      inputrec->ePBC==epbcNONE ? NULL : &pbc);
 +        }
 +        else if (fr->adress_site == eAdressSITEcom)
 +        {
 +            update_adress_weights_com(fplog,cg0,cg1,&(top->cgs),x,fr,mdatoms,
 +                                      inputrec->ePBC==epbcNONE ? NULL : &pbc);
 +        }
 +        else if (fr->adress_site == eAdressSITEatomatom){
 +            update_adress_weights_atom_per_atom(cg0,cg1,&(top->cgs),x,fr,mdatoms,
 +                                                inputrec->ePBC==epbcNONE ? NULL : &pbc);
 +        }
 +        else
 +        {
 +            update_adress_weights_atom(cg0,cg1,&(top->cgs),x,fr,mdatoms,
 +                                       inputrec->ePBC==epbcNONE ? NULL : &pbc);
 +        }
 +    }
 +
 +    if (NEED_MUTOT(*inputrec))
 +    {
 +
 +        if (bStateChanged)
 +        {
 +            if (PAR(cr))
 +            {
 +                gmx_sumd(2*DIM,mu,cr);
 +            }
 +            for(i=0; i<2; i++)
 +            {
 +                for(j=0;j<DIM;j++)
 +                {
 +                    fr->mu_tot[i][j] = mu[i*DIM + j];
 +                }
 +            }
 +        }
 +        if (fr->efep == efepNO)
 +        {
 +            copy_rvec(fr->mu_tot[0],mu_tot);
 +        }
 +        else
 +        {
 +            for(j=0; j<DIM; j++)
 +            {
 +                mu_tot[j] =
 +                    (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
 +            }
 +        }
 +    }
 +
 +    /* Reset energies */
 +    reset_enerdata(&(inputrec->opts),fr,bNS,enerd,MASTER(cr));
 +    clear_rvecs(SHIFTS,fr->fshift);
 +
 +    if (bNS)
 +    {
 +        wallcycle_start(wcycle,ewcNS);
 +
 +        if (graph && bStateChanged)
 +        {
 +            /* Calculate intramolecular shift vectors to make molecules whole */
 +            mk_mshift(fplog,graph,fr->ePBC,box,x);
 +        }
 +
 +        /* Do the actual neighbour searching and if twin range electrostatics
 +         * also do the calculation of long range forces and energies.
 +         */
 +        for (i=0;i<efptNR;i++) {dvdlambda[i] = 0;}
 +        ns(fplog,fr,x,box,
 +           groups,&(inputrec->opts),top,mdatoms,
 +           cr,nrnb,lambda,dvdlambda,&enerd->grpp,bFillGrid,
 +           bDoLongRangeNS);
 +        if (bSepDVDL)
 +        {
 +            fprintf(fplog,sepdvdlformat,"LR non-bonded",0.0,dvdlambda);
 +        }
 +        enerd->dvdl_lin[efptVDW] += dvdlambda[efptVDW];
 +        enerd->dvdl_lin[efptCOUL] += dvdlambda[efptCOUL];
 +
 +        wallcycle_stop(wcycle,ewcNS);
 +    }
 +
 +    if (inputrec->implicit_solvent && bNS)
 +    {
 +        make_gb_nblist(cr,inputrec->gb_algorithm,inputrec->rlist,
 +                       x,box,fr,&top->idef,graph,fr->born);
 +    }
 +
 +    if (DOMAINDECOMP(cr))
 +    {
 +        if (!(cr->duty & DUTY_PME))
 +        {
 +            wallcycle_start(wcycle,ewcPPDURINGPME);
 +            dd_force_flop_start(cr->dd,nrnb);
 +        }
 +    }
 +
 +    if (inputrec->bRot)
 +    {
 +        /* Enforced rotation has its own cycle counter that starts after the collective
 +         * coordinates have been communicated. It is added to ddCyclF to allow
 +         * for proper load-balancing */
 +        wallcycle_start(wcycle,ewcROT);
 +        do_rotation(cr,inputrec,box,x,t,step,wcycle,bNS);
 +        wallcycle_stop(wcycle,ewcROT);
 +    }
 +
 +    /* Start the force cycle counter.
 +     * This counter is stopped in do_forcelow_level.
 +     * No parallel communication should occur while this counter is running,
 +     * since that will interfere with the dynamic load balancing.
 +     */
 +    wallcycle_start(wcycle,ewcFORCE);
 +    
 +    if (bDoForces)
 +    {
 +        /* Reset forces for which the virial is calculated separately:
 +         * PME/Ewald forces if necessary */
 +        if (fr->bF_NoVirSum)
 +        {
 +            if (flags & GMX_FORCE_VIRIAL)
 +            {
 +                fr->f_novirsum = fr->f_novirsum_alloc;
 +                if (fr->bDomDec)
 +                {
 +                    clear_rvecs(fr->f_novirsum_n,fr->f_novirsum);
 +                }
 +                else
 +                {
 +                    clear_rvecs(homenr,fr->f_novirsum+start);
 +                }
 +            }
 +            else
 +            {
 +                /* We are not calculating the pressure so we do not need
 +                 * a separate array for forces that do not contribute
 +                 * to the pressure.
 +                 */
 +                fr->f_novirsum = f;
 +            }
 +        }
 +
 +        /* Clear the short- and long-range forces */
 +        clear_rvecs(fr->natoms_force_constr,f);
 +        if(bSepLRF && do_per_step(step,inputrec->nstcalclr))
 +        {
 +            clear_rvecs(fr->natoms_force_constr,fr->f_twin);
 +        }
 +        
 +        clear_rvec(fr->vir_diag_posres);
 +    }
 +    if (inputrec->ePull == epullCONSTRAINT)
 +    {
 +        clear_pull_forces(inputrec->pull);
 +    }
 +
 +    /* update QMMMrec, if necessary */
 +    if(fr->bQMMM)
 +    {
 +        update_QMMMrec(cr,fr,x,mdatoms,box,top);
 +    }
 +
 +    if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
 +    {
 +        posres_wrapper(fplog,flags,bSepDVDL,inputrec,nrnb,top,box,x,
 +                       f,enerd,lambda,fr);
 +    }
 +
 +    if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_FBPOSRES].nr > 0)
 +    {
 +        /* Flat-bottomed position restraints always require full pbc */
 +        if(!(bStateChanged && bDoAdressWF))
 +        {
 +            set_pbc(&pbc,inputrec->ePBC,box);
 +        }
 +        v = fbposres(top->idef.il[F_FBPOSRES].nr,top->idef.il[F_FBPOSRES].iatoms,
 +                     top->idef.iparams_fbposres,
 +                     (const rvec*)x,fr->f_novirsum,fr->vir_diag_posres,
 +                     inputrec->ePBC==epbcNONE ? NULL : &pbc,
 +                     fr->rc_scaling,fr->ePBC,fr->posres_com);
 +        enerd->term[F_FBPOSRES] += v;
 +        inc_nrnb(nrnb,eNR_FBPOSRES,top->idef.il[F_FBPOSRES].nr/2);
 +    }
 +
 +    /* Compute the bonded and non-bonded energies and optionally forces */
 +    do_force_lowlevel(fplog,step,fr,inputrec,&(top->idef),
 +                      cr,nrnb,wcycle,mdatoms,&(inputrec->opts),
 +                      x,hist,f, bSepLRF ? fr->f_twin : f,enerd,fcd,mtop,top,fr->born,
 +                      &(top->atomtypes),bBornRadii,box,
 +                      inputrec->fepvals,lambda,
 +                      graph,&(top->excls),fr->mu_tot,
 +                      flags,
 +                      &cycles_pme);
 +
 +    if(bSepLRF)
 +    {
 +        if (do_per_step(step,inputrec->nstcalclr))
 +        {
 +            /* Add the long range forces to the short range forces */
 +            for(i=0; i<fr->natoms_force_constr; i++)
 +            {
 +                rvec_add(fr->f_twin[i],f[i],f[i]);
 +            }
 +        }
 +    }
 +    
 +    cycles_force = wallcycle_stop(wcycle,ewcFORCE);
 +
 +    if (ed)
 +    {
++        do_flood(cr,inputrec,x,f,ed,box,step,bNS);
 +    }
 +
 +    if (DOMAINDECOMP(cr))
 +    {
 +        dd_force_flop_stop(cr->dd,nrnb);
 +        if (wcycle)
 +        {
 +            dd_cycles_add(cr->dd,cycles_force-cycles_pme,ddCyclF);
 +        }
 +    }
 +
 +    if (bDoForces)
 +    {
 +        if (IR_ELEC_FIELD(*inputrec))
 +        {
 +            /* Compute forces due to electric field */
 +            calc_f_el(MASTER(cr) ? field : NULL,
 +                      start,homenr,mdatoms->chargeA,x,fr->f_novirsum,
 +                      inputrec->ex,inputrec->et,t);
 +        }
 +
 +        if (bDoAdressWF && fr->adress_icor == eAdressICThermoForce)
 +        {
 +            /* Compute thermodynamic force in hybrid AdResS region */
 +            adress_thermo_force(start,homenr,&(top->cgs),x,fr->f_novirsum,fr,mdatoms,
 +                                inputrec->ePBC==epbcNONE ? NULL : &pbc);
 +        }
 +
 +        /* Communicate the forces */
 +        if (PAR(cr))
 +        {
 +            wallcycle_start(wcycle,ewcMOVEF);
 +            if (DOMAINDECOMP(cr))
 +            {
 +                dd_move_f(cr->dd,f,fr->fshift);
 +                /* Do we need to communicate the separate force array
 +                 * for terms that do not contribute to the single sum virial?
 +                 * Position restraints and electric fields do not introduce
 +                 * inter-cg forces, only full electrostatics methods do.
 +                 * When we do not calculate the virial, fr->f_novirsum = f,
 +                 * so we have already communicated these forces.
 +                 */
 +                if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
 +                    (flags & GMX_FORCE_VIRIAL))
 +                {
 +                    dd_move_f(cr->dd,fr->f_novirsum,NULL);
 +                }
 +                if (bSepLRF)
 +                {
 +                    /* We should not update the shift forces here,
 +                     * since f_twin is already included in f.
 +                     */
 +                    dd_move_f(cr->dd,fr->f_twin,NULL);
 +                }
 +            }
 +            else
 +            {
 +                pd_move_f(cr,f,nrnb);
 +                if (bSepLRF)
 +                {
 +                    pd_move_f(cr,fr->f_twin,nrnb);
 +                }
 +            }
 +            wallcycle_stop(wcycle,ewcMOVEF);
 +        }
 +
 +        /* If we have NoVirSum forces, but we do not calculate the virial,
 +         * we sum fr->f_novirum=f later.
 +         */
 +        if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
 +        {
 +            wallcycle_start(wcycle,ewcVSITESPREAD);
 +            spread_vsite_f(fplog,vsite,x,f,fr->fshift,FALSE,NULL,nrnb,
 +                           &top->idef,fr->ePBC,fr->bMolPBC,graph,box,cr);
 +            wallcycle_stop(wcycle,ewcVSITESPREAD);
 +
 +            if (bSepLRF)
 +            {
 +                wallcycle_start(wcycle,ewcVSITESPREAD);
 +                spread_vsite_f(fplog,vsite,x,fr->f_twin,NULL,FALSE,NULL,
 +                               nrnb,
 +                               &top->idef,fr->ePBC,fr->bMolPBC,graph,box,cr);
 +                wallcycle_stop(wcycle,ewcVSITESPREAD);
 +            }
 +        }
 +
 +        if (flags & GMX_FORCE_VIRIAL)
 +        {
 +            /* Calculation of the virial must be done after vsites! */
 +            calc_virial(fplog,mdatoms->start,mdatoms->homenr,x,f,
 +                        vir_force,graph,box,nrnb,fr,inputrec->ePBC);
 +        }
 +    }
 +
 +    if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
 +    {
 +        pull_potential_wrapper(fplog,bSepDVDL,cr,inputrec,box,x,
 +                               f,vir_force,mdatoms,enerd,lambda,t);
 +    }
 +
 +    /* Add the forces from enforced rotation potentials (if any) */
 +    if (inputrec->bRot)
 +    {
 +        wallcycle_start(wcycle,ewcROTadd);
 +        enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr,step,t);
 +        wallcycle_stop(wcycle,ewcROTadd);
 +    }
 +
 +    if (PAR(cr) && !(cr->duty & DUTY_PME))
 +    {
 +        /* In case of node-splitting, the PP nodes receive the long-range 
 +         * forces, virial and energy from the PME nodes here.
 +         */
 +        pme_receive_force_ener(fplog,bSepDVDL,cr,wcycle,enerd,fr);
 +    }
 +
 +    if (bDoForces)
 +    {
 +        post_process_forces(fplog,cr,step,nrnb,wcycle,
 +                            top,box,x,f,vir_force,mdatoms,graph,fr,vsite,
 +                            flags);
 +    }
 +
 +    /* Sum the potential energy terms from group contributions */
 +    sum_epot(&(inputrec->opts),&(enerd->grpp),enerd->term);
 +}
 +
 +void do_force(FILE *fplog,t_commrec *cr,
 +              t_inputrec *inputrec,
 +              gmx_large_int_t step,t_nrnb *nrnb,gmx_wallcycle_t wcycle,
 +              gmx_localtop_t *top,
 +              gmx_mtop_t *mtop,
 +              gmx_groups_t *groups,
 +              matrix box,rvec x[],history_t *hist,
 +              rvec f[],
 +              tensor vir_force,
 +              t_mdatoms *mdatoms,
 +              gmx_enerdata_t *enerd,t_fcdata *fcd,
 +              real *lambda,t_graph *graph,
 +              t_forcerec *fr,
 +              gmx_vsite_t *vsite,rvec mu_tot,
 +              double t,FILE *field,gmx_edsam_t ed,
 +              gmx_bool bBornRadii,
 +              int flags)
 +{
 +    /* modify force flag if not doing nonbonded */
 +    if (!fr->bNonbonded)
 +    {
 +        flags &= ~GMX_FORCE_NONBONDED;
 +    }
 +
 +    switch (inputrec->cutoff_scheme)
 +    {
 +        case ecutsVERLET:
 +            do_force_cutsVERLET(fplog, cr, inputrec,
 +                                step, nrnb, wcycle,
 +                                top, mtop,
 +                                groups,
 +                                box, x, hist,
 +                                f, vir_force,
 +                                mdatoms,
 +                                enerd, fcd,
 +                                lambda, graph,
 +                                fr, fr->ic, 
 +                                vsite, mu_tot,
 +                                t, field, ed,
 +                                bBornRadii,
 +                                flags);
 +            break;
 +        case ecutsGROUP:
 +             do_force_cutsGROUP(fplog, cr, inputrec,
 +                                step, nrnb, wcycle,
 +                                top, mtop,
 +                                groups,
 +                                box, x, hist,
 +                                f, vir_force,
 +                                mdatoms,
 +                                enerd, fcd,
 +                                lambda, graph,
 +                                fr, vsite, mu_tot,
 +                                t, field, ed,
 +                                bBornRadii,
 +                                flags);
 +            break;
 +        default:
 +            gmx_incons("Invalid cut-off scheme passed!");
 +    }
 +}
 +
 +
 +void do_constrain_first(FILE *fplog,gmx_constr_t constr,
 +                        t_inputrec *ir,t_mdatoms *md,
 +                        t_state *state,rvec *f,
 +                        t_graph *graph,t_commrec *cr,t_nrnb *nrnb,
 +                        t_forcerec *fr, gmx_localtop_t *top, tensor shake_vir)
 +{
 +    int    i,m,start,end;
 +    gmx_large_int_t step;
 +    real   dt=ir->delta_t;
 +    real   dvdl_dum;
 +    rvec   *savex;
 +
 +    snew(savex,state->natoms);
 +
 +    start = md->start;
 +    end   = md->homenr + start;
 +
 +    if (debug)
 +        fprintf(debug,"vcm: start=%d, homenr=%d, end=%d\n",
 +                start,md->homenr,end);
 +    /* Do a first constrain to reset particles... */
 +    step = ir->init_step;
 +    if (fplog)
 +    {
 +        char buf[STEPSTRSIZE];
 +        fprintf(fplog,"\nConstraining the starting coordinates (step %s)\n",
 +                gmx_step_str(step,buf));
 +    }
 +    dvdl_dum = 0;
 +
 +    /* constrain the current position */
 +    constrain(NULL,TRUE,FALSE,constr,&(top->idef),
 +              ir,NULL,cr,step,0,md,
 +              state->x,state->x,NULL,
 +              fr->bMolPBC,state->box,
 +              state->lambda[efptBONDED],&dvdl_dum,
 +              NULL,NULL,nrnb,econqCoord,
 +              ir->epc==epcMTTK,state->veta,state->veta);
 +    if (EI_VV(ir->eI))
 +    {
 +        /* constrain the inital velocity, and save it */
 +        /* also may be useful if we need the ekin from the halfstep for velocity verlet */
 +        /* might not yet treat veta correctly */
 +        constrain(NULL,TRUE,FALSE,constr,&(top->idef),
 +                  ir,NULL,cr,step,0,md,
 +                  state->x,state->v,state->v,
 +                  fr->bMolPBC,state->box,
 +                  state->lambda[efptBONDED],&dvdl_dum,
 +                  NULL,NULL,nrnb,econqVeloc,
 +                  ir->epc==epcMTTK,state->veta,state->veta);
 +    }
 +    /* constrain the inital velocities at t-dt/2 */
 +    if (EI_STATE_VELOCITY(ir->eI) && ir->eI!=eiVV)
 +    {
 +        for(i=start; (i<end); i++)
 +        {
 +            for(m=0; (m<DIM); m++)
 +            {
 +                /* Reverse the velocity */
 +                state->v[i][m] = -state->v[i][m];
 +                /* Store the position at t-dt in buf */
 +                savex[i][m] = state->x[i][m] + dt*state->v[i][m];
 +            }
 +        }
 +    /* Shake the positions at t=-dt with the positions at t=0
 +     * as reference coordinates.
 +         */
 +        if (fplog)
 +        {
 +            char buf[STEPSTRSIZE];
 +            fprintf(fplog,"\nConstraining the coordinates at t0-dt (step %s)\n",
 +                    gmx_step_str(step,buf));
 +        }
 +        dvdl_dum = 0;
 +        constrain(NULL,TRUE,FALSE,constr,&(top->idef),
 +                  ir,NULL,cr,step,-1,md,
 +                  state->x,savex,NULL,
 +                  fr->bMolPBC,state->box,
 +                  state->lambda[efptBONDED],&dvdl_dum,
 +                  state->v,NULL,nrnb,econqCoord,
 +                  ir->epc==epcMTTK,state->veta,state->veta);
 +        
 +        for(i=start; i<end; i++) {
 +            for(m=0; m<DIM; m++) {
 +                /* Re-reverse the velocities */
 +                state->v[i][m] = -state->v[i][m];
 +            }
 +        }
 +    }
 +    sfree(savex);
 +}
 +
 +void calc_enervirdiff(FILE *fplog,int eDispCorr,t_forcerec *fr)
 +{
 +  double eners[2],virs[2],enersum,virsum,y0,f,g,h;
 +  double r0,r1,r,rc3,rc9,ea,eb,ec,pa,pb,pc,pd;
 +  double invscale,invscale2,invscale3;
 +  int    ri0,ri1,ri,i,offstart,offset;
 +  real   scale,*vdwtab,tabfactor,tmp;
 +
 +  fr->enershiftsix = 0;
 +  fr->enershifttwelve = 0;
 +  fr->enerdiffsix = 0;
 +  fr->enerdifftwelve = 0;
 +  fr->virdiffsix = 0;
 +  fr->virdifftwelve = 0;
 +
 +  if (eDispCorr != edispcNO) {
 +    for(i=0; i<2; i++) {
 +      eners[i] = 0;
 +      virs[i]  = 0;
 +    }
 +    if ((fr->vdwtype == evdwSWITCH) || (fr->vdwtype == evdwSHIFT)) {
 +      if (fr->rvdw_switch == 0)
 +      gmx_fatal(FARGS,
 +                "With dispersion correction rvdw-switch can not be zero "
 +                "for vdw-type = %s",evdw_names[fr->vdwtype]);
 +
 +      scale  = fr->nblists[0].table_elec_vdw.scale;
 +      vdwtab = fr->nblists[0].table_vdw.data;
 +
 +      /* Round the cut-offs to exact table values for precision */
 +      ri0 = floor(fr->rvdw_switch*scale);
 +      ri1 = ceil(fr->rvdw*scale);
 +      r0  = ri0/scale;
 +      r1  = ri1/scale;
 +      rc3 = r0*r0*r0;
 +      rc9  = rc3*rc3*rc3;
 +
 +      if (fr->vdwtype == evdwSHIFT)
 +      {
 +          /* Determine the constant energy shift below rvdw_switch.
 +           * Table has a scale factor since we have scaled it down to compensate
 +           * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
 +           */
 +          fr->enershiftsix    = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
 +          fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
 +      }
 +      /* Add the constant part from 0 to rvdw_switch.
 +       * This integration from 0 to rvdw_switch overcounts the number
 +       * of interactions by 1, as it also counts the self interaction.
 +       * We will correct for this later.
 +       */
 +      eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
 +      eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
 +
 +      invscale = 1.0/(scale);
 +      invscale2 = invscale*invscale;
 +      invscale3 = invscale*invscale2;
 +
 +      /* following summation derived from cubic spline definition,
 +      Numerical Recipies in C, second edition, p. 113-116.  Exact
 +      for the cubic spline.  We first calculate the negative of
 +      the energy from rvdw to rvdw_switch, assuming that g(r)=1,
 +      and then add the more standard, abrupt cutoff correction to
 +      that result, yielding the long-range correction for a
 +      switched function.  We perform both the pressure and energy
 +      loops at the same time for simplicity, as the computational
 +      cost is low. */
 +
 +      for (i=0;i<2;i++) {
 +        enersum = 0.0; virsum = 0.0;
 +        if (i==0)
 +        {
 +            offstart = 0;
 +            /* Since the dispersion table has been scaled down a factor 6.0 and the repulsion
 +             * a factor 12.0 to compensate for the c6/c12 parameters inside nbfp[] being scaled
 +             * up (to save flops in kernels), we need to correct for this.
 +             */
 +            tabfactor = 6.0;
 +        }
 +        else
 +        {
 +            offstart = 4;
 +            tabfactor = 12.0;
 +        }
 +      for (ri=ri0; ri<ri1; ri++) {
 +          r = ri*invscale;
 +          ea = invscale3;
 +          eb = 2.0*invscale2*r;
 +          ec = invscale*r*r;
 +
 +          pa = invscale3;
 +          pb = 3.0*invscale2*r;
 +          pc = 3.0*invscale*r*r;
 +          pd = r*r*r;
 +
 +          /* this "8" is from the packing in the vdwtab array - perhaps should be #define'ed? */
 +          offset = 8*ri + offstart;
 +          y0 = vdwtab[offset];
 +          f  = vdwtab[offset+1];
 +          g  = vdwtab[offset+2];
 +          h  = vdwtab[offset+3];
 +
 +          enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
 +          virsum  += f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
 +        }
 +          
 +        enersum *= 4.0*M_PI*tabfactor;
 +        virsum  *= 4.0*M_PI*tabfactor;
 +        eners[i] -= enersum;
 +        virs[i]  -= virsum;
 +      }
 +
 +      /* now add the correction for rvdw_switch to infinity */
 +      eners[0] += -4.0*M_PI/(3.0*rc3);
 +      eners[1] +=  4.0*M_PI/(9.0*rc9);
 +      virs[0]  +=  8.0*M_PI/rc3;
 +      virs[1]  += -16.0*M_PI/(3.0*rc9);
 +    }
 +    else if ((fr->vdwtype == evdwCUT) || (fr->vdwtype == evdwUSER)) {
 +      if (fr->vdwtype == evdwUSER && fplog)
 +      fprintf(fplog,
 +              "WARNING: using dispersion correction with user tables\n");
 +      rc3  = fr->rvdw*fr->rvdw*fr->rvdw;
 +      rc9  = rc3*rc3*rc3;
 +      /* Contribution beyond the cut-off */
 +      eners[0] += -4.0*M_PI/(3.0*rc3);
 +      eners[1] +=  4.0*M_PI/(9.0*rc9);
 +      if (fr->vdw_modifier==eintmodPOTSHIFT) {
 +          /* Contribution within the cut-off */
 +          eners[0] += -4.0*M_PI/(3.0*rc3);
 +          eners[1] +=  4.0*M_PI/(3.0*rc9);
 +      }
 +      /* Contribution beyond the cut-off */
 +      virs[0]  +=  8.0*M_PI/rc3;
 +      virs[1]  += -16.0*M_PI/(3.0*rc9);
 +    } else {
 +      gmx_fatal(FARGS,
 +              "Dispersion correction is not implemented for vdw-type = %s",
 +              evdw_names[fr->vdwtype]);
 +    }
 +    fr->enerdiffsix    = eners[0];
 +    fr->enerdifftwelve = eners[1];
 +    /* The 0.5 is due to the Gromacs definition of the virial */
 +    fr->virdiffsix     = 0.5*virs[0];
 +    fr->virdifftwelve  = 0.5*virs[1];
 +  }
 +}
 +
 +void calc_dispcorr(FILE *fplog,t_inputrec *ir,t_forcerec *fr,
 +                   gmx_large_int_t step,int natoms,
 +                   matrix box,real lambda,tensor pres,tensor virial,
 +                   real *prescorr, real *enercorr, real *dvdlcorr)
 +{
 +    gmx_bool bCorrAll,bCorrPres;
 +    real dvdlambda,invvol,dens,ninter,avcsix,avctwelve,enerdiff,svir=0,spres=0;
 +    int  m;
 +
 +    *prescorr = 0;
 +    *enercorr = 0;
 +    *dvdlcorr = 0;
 +
 +    clear_mat(virial);
 +    clear_mat(pres);
 +
 +    if (ir->eDispCorr != edispcNO) {
 +        bCorrAll  = (ir->eDispCorr == edispcAllEner ||
 +                     ir->eDispCorr == edispcAllEnerPres);
 +        bCorrPres = (ir->eDispCorr == edispcEnerPres ||
 +                     ir->eDispCorr == edispcAllEnerPres);
 +
 +        invvol = 1/det(box);
 +        if (fr->n_tpi)
 +        {
 +            /* Only correct for the interactions with the inserted molecule */
 +            dens = (natoms - fr->n_tpi)*invvol;
 +            ninter = fr->n_tpi;
 +        }
 +        else
 +        {
 +            dens = natoms*invvol;
 +            ninter = 0.5*natoms;
 +        }
 +
 +        if (ir->efep == efepNO)
 +        {
 +            avcsix    = fr->avcsix[0];
 +            avctwelve = fr->avctwelve[0];
 +        }
 +        else
 +        {
 +            avcsix    = (1 - lambda)*fr->avcsix[0]    + lambda*fr->avcsix[1];
 +            avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
 +        }
 +
 +        enerdiff = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
 +        *enercorr += avcsix*enerdiff;
 +        dvdlambda = 0.0;
 +        if (ir->efep != efepNO)
 +        {
 +            dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
 +        }
 +        if (bCorrAll)
 +        {
 +            enerdiff = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
 +            *enercorr += avctwelve*enerdiff;
 +            if (fr->efep != efepNO)
 +            {
 +                dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
 +            }
 +        }
 +
 +        if (bCorrPres)
 +        {
 +            svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
 +            if (ir->eDispCorr == edispcAllEnerPres)
 +            {
 +                svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
 +            }
 +            /* The factor 2 is because of the Gromacs virial definition */
 +            spres = -2.0*invvol*svir*PRESFAC;
 +
 +            for(m=0; m<DIM; m++) {
 +                virial[m][m] += svir;
 +                pres[m][m] += spres;
 +            }
 +            *prescorr += spres;
 +        }
 +
 +        /* Can't currently control when it prints, for now, just print when degugging */
 +        if (debug)
 +        {
 +            if (bCorrAll) {
 +                fprintf(debug,"Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
 +                        avcsix,avctwelve);
 +            }
 +            if (bCorrPres)
 +            {
 +                fprintf(debug,
 +                        "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
 +                        *enercorr,spres,svir);
 +            }
 +            else
 +            {
 +                fprintf(debug,"Long Range LJ corr.: Epot %10g\n",*enercorr);
 +            }
 +        }
 +
 +        if (fr->bSepDVDL && do_per_step(step,ir->nstlog))
 +        {
 +            fprintf(fplog,sepdvdlformat,"Dispersion correction",
 +                    *enercorr,dvdlambda);
 +        }
 +        if (fr->efep != efepNO)
 +        {
 +            *dvdlcorr += dvdlambda;
 +        }
 +    }
 +}
 +
 +void do_pbc_first(FILE *fplog,matrix box,t_forcerec *fr,
 +                t_graph *graph,rvec x[])
 +{
 +  if (fplog)
 +    fprintf(fplog,"Removing pbc first time\n");
 +  calc_shifts(box,fr->shift_vec);
 +  if (graph) {
 +    mk_mshift(fplog,graph,fr->ePBC,box,x);
 +    if (gmx_debug_at)
 +      p_graph(debug,"do_pbc_first 1",graph);
 +    shift_self(graph,box,x);
 +    /* By doing an extra mk_mshift the molecules that are broken
 +     * because they were e.g. imported from another software
 +     * will be made whole again. Such are the healing powers
 +     * of GROMACS.
 +     */
 +    mk_mshift(fplog,graph,fr->ePBC,box,x);
 +    if (gmx_debug_at)
 +      p_graph(debug,"do_pbc_first 2",graph);
 +  }
 +  if (fplog)
 +    fprintf(fplog,"Done rmpbc\n");
 +}
 +
 +static void low_do_pbc_mtop(FILE *fplog,int ePBC,matrix box,
 +                          gmx_mtop_t *mtop,rvec x[],
 +                          gmx_bool bFirst)
 +{
 +  t_graph *graph;
 +  int mb,as,mol;
 +  gmx_molblock_t *molb;
 +
 +  if (bFirst && fplog)
 +    fprintf(fplog,"Removing pbc first time\n");
 +
 +  snew(graph,1);
 +  as = 0;
 +  for(mb=0; mb<mtop->nmolblock; mb++) {
 +    molb = &mtop->molblock[mb];
 +    if (molb->natoms_mol == 1 ||
 +      (!bFirst && mtop->moltype[molb->type].cgs.nr == 1)) {
 +      /* Just one atom or charge group in the molecule, no PBC required */
 +      as += molb->nmol*molb->natoms_mol;
 +    } else {
 +      /* Pass NULL iso fplog to avoid graph prints for each molecule type */
 +      mk_graph_ilist(NULL,mtop->moltype[molb->type].ilist,
 +                   0,molb->natoms_mol,FALSE,FALSE,graph);
 +
 +      for(mol=0; mol<molb->nmol; mol++) {
 +      mk_mshift(fplog,graph,ePBC,box,x+as);
 +
 +      shift_self(graph,box,x+as);
 +      /* The molecule is whole now.
 +       * We don't need the second mk_mshift call as in do_pbc_first,
 +       * since we no longer need this graph.
 +       */
 +
 +      as += molb->natoms_mol;
 +      }
 +      done_graph(graph);
 +    }
 +  }
 +  sfree(graph);
 +}
 +
 +void do_pbc_first_mtop(FILE *fplog,int ePBC,matrix box,
 +                     gmx_mtop_t *mtop,rvec x[])
 +{
 +  low_do_pbc_mtop(fplog,ePBC,box,mtop,x,TRUE);
 +}
 +
 +void do_pbc_mtop(FILE *fplog,int ePBC,matrix box,
 +               gmx_mtop_t *mtop,rvec x[])
 +{
 +  low_do_pbc_mtop(fplog,ePBC,box,mtop,x,FALSE);
 +}
 +
 +void finish_run(FILE *fplog,t_commrec *cr,const char *confout,
 +                t_inputrec *inputrec,
 +                t_nrnb nrnb[],gmx_wallcycle_t wcycle,
 +                gmx_runtime_t *runtime,
 +                wallclock_gpu_t *gputimes,
 +                int omp_nth_pp,
 +                gmx_bool bWriteStat)
 +{
 +    int    i,j;
 +    t_nrnb *nrnb_tot=NULL;
 +    real   delta_t;
 +    double nbfs,mflop;
 +
 +    wallcycle_sum(cr,wcycle);
 +
 +    if (cr->nnodes > 1)
 +    {
 +        snew(nrnb_tot,1);
 +#ifdef GMX_MPI
 +        MPI_Allreduce(nrnb->n,nrnb_tot->n,eNRNB,MPI_DOUBLE,MPI_SUM,
 +                      cr->mpi_comm_mysim);
 +#endif
 +    }
 +    else
 +    {
 +        nrnb_tot = nrnb;
 +    }
 +
 +#if defined(GMX_MPI) && !defined(GMX_THREAD_MPI)
 +    if (cr->nnodes > 1)
 +    {
 +        /* reduce nodetime over all MPI processes in the current simulation */
 +        double sum;
 +        MPI_Allreduce(&runtime->proctime,&sum,1,MPI_DOUBLE,MPI_SUM,
 +                      cr->mpi_comm_mysim);
 +        runtime->proctime = sum;
 +    }
 +#endif
 +
 +    if (SIMMASTER(cr))
 +    {
 +        print_flop(fplog,nrnb_tot,&nbfs,&mflop);
 +    }
 +    if (cr->nnodes > 1)
 +    {
 +        sfree(nrnb_tot);
 +    }
 +
 +    if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
 +    {
 +        print_dd_statistics(cr,inputrec,fplog);
 +    }
 +
 +#ifdef GMX_MPI
 +    if (PARTDECOMP(cr))
 +    {
 +        if (MASTER(cr))
 +        {
 +            t_nrnb     *nrnb_all;
 +            int        s;
 +            MPI_Status stat;
 +
 +            snew(nrnb_all,cr->nnodes);
 +            nrnb_all[0] = *nrnb;
 +            for(s=1; s<cr->nnodes; s++)
 +            {
 +                MPI_Recv(nrnb_all[s].n,eNRNB,MPI_DOUBLE,s,0,
 +                         cr->mpi_comm_mysim,&stat);
 +            }
 +            pr_load(fplog,cr,nrnb_all);
 +            sfree(nrnb_all);
 +        }
 +        else
 +        {
 +            MPI_Send(nrnb->n,eNRNB,MPI_DOUBLE,MASTERRANK(cr),0,
 +                     cr->mpi_comm_mysim);
 +        }
 +    }
 +#endif
 +
 +    if (SIMMASTER(cr))
 +    {
 +        wallcycle_print(fplog,cr->nnodes,cr->npmenodes,runtime->realtime,
 +                        wcycle,gputimes);
 +
 +        if (EI_DYNAMICS(inputrec->eI))
 +        {
 +            delta_t = inputrec->delta_t;
 +        }
 +        else
 +        {
 +            delta_t = 0;
 +        }
 +
 +        if (fplog)
 +        {
 +            print_perf(fplog,runtime->proctime,runtime->realtime,
 +                       cr->nnodes-cr->npmenodes,
 +                       runtime->nsteps_done,delta_t,nbfs,mflop,
 +                       omp_nth_pp);
 +        }
 +        if (bWriteStat)
 +        {
 +            print_perf(stderr,runtime->proctime,runtime->realtime,
 +                       cr->nnodes-cr->npmenodes,
 +                       runtime->nsteps_done,delta_t,nbfs,mflop,
 +                       omp_nth_pp);
 +        }
 +    }
 +}
 +
 +extern void initialize_lambdas(FILE *fplog,t_inputrec *ir,int *fep_state,real *lambda,double *lam0)
 +{
 +    /* this function works, but could probably use a logic rewrite to keep all the different
 +       types of efep straight. */
 +
 +    int i;
 +    t_lambda *fep = ir->fepvals;
 +
 +    if ((ir->efep==efepNO) && (ir->bSimTemp == FALSE)) {
 +        for (i=0;i<efptNR;i++)  {
 +            lambda[i] = 0.0;
 +            if (lam0)
 +            {
 +                lam0[i] = 0.0;
 +            }
 +        }
 +        return;
 +    } else {
 +        *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
 +                                             if checkpoint is set -- a kludge is in for now
 +                                             to prevent this.*/
 +        for (i=0;i<efptNR;i++)
 +        {
 +            /* overwrite lambda state with init_lambda for now for backwards compatibility */
 +            if (fep->init_lambda>=0) /* if it's -1, it was never initializd */
 +            {
 +                lambda[i] = fep->init_lambda;
 +                if (lam0) {
 +                    lam0[i] = lambda[i];
 +                }
 +            }
 +            else
 +            {
 +                lambda[i] = fep->all_lambda[i][*fep_state];
 +                if (lam0) {
 +                    lam0[i] = lambda[i];
 +                }
 +            }
 +        }
 +        if (ir->bSimTemp) {
 +            /* need to rescale control temperatures to match current state */
 +            for (i=0;i<ir->opts.ngtc;i++) {
 +                if (ir->opts.ref_t[i] > 0) {
 +                    ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
 +                }
 +            }
 +        }
 +    }
 +
 +    /* Send to the log the information on the current lambdas */
 +    if (fplog != NULL)
 +    {
 +        fprintf(fplog,"Initial vector of lambda components:[ ");
 +        for (i=0;i<efptNR;i++)
 +        {
 +            fprintf(fplog,"%10.4f ",lambda[i]);
 +        }
 +        fprintf(fplog,"]\n");
 +    }
 +    return;
 +}
 +
 +
 +void init_md(FILE *fplog,
 +             t_commrec *cr,t_inputrec *ir,const output_env_t oenv,
 +             double *t,double *t0,
 +             real *lambda, int *fep_state, double *lam0,
 +             t_nrnb *nrnb,gmx_mtop_t *mtop,
 +             gmx_update_t *upd,
 +             int nfile,const t_filenm fnm[],
 +             gmx_mdoutf_t **outf,t_mdebin **mdebin,
 +             tensor force_vir,tensor shake_vir,rvec mu_tot,
 +             gmx_bool *bSimAnn,t_vcm **vcm, t_state *state, unsigned long Flags)
 +{
 +    int  i,j,n;
 +    real tmpt,mod;
 +
 +    /* Initial values */
 +    *t = *t0       = ir->init_t;
 +
 +    *bSimAnn=FALSE;
 +    for(i=0;i<ir->opts.ngtc;i++)
 +    {
 +        /* set bSimAnn if any group is being annealed */
 +        if(ir->opts.annealing[i]!=eannNO)
 +        {
 +            *bSimAnn = TRUE;
 +        }
 +    }
 +    if (*bSimAnn)
 +    {
 +        update_annealing_target_temp(&(ir->opts),ir->init_t);
 +    }
 +
 +    /* Initialize lambda variables */
 +    initialize_lambdas(fplog,ir,fep_state,lambda,lam0);
 +
 +    if (upd)
 +    {
 +        *upd = init_update(fplog,ir);
 +    }
 +
 +
 +    if (vcm != NULL)
 +    {
 +        *vcm = init_vcm(fplog,&mtop->groups,ir);
 +    }
 +
 +    if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
 +    {
 +        if (ir->etc == etcBERENDSEN)
 +        {
 +            please_cite(fplog,"Berendsen84a");
 +        }
 +        if (ir->etc == etcVRESCALE)
 +        {
 +            please_cite(fplog,"Bussi2007a");
 +        }
 +    }
 +
 +    init_nrnb(nrnb);
 +
 +    if (nfile != -1)
 +    {
 +        *outf = init_mdoutf(nfile,fnm,Flags,cr,ir,oenv);
 +
 +        *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? NULL : (*outf)->fp_ene,
 +                              mtop,ir, (*outf)->fp_dhdl);
 +    }
 +
 +    if (ir->bAdress)
 +    {
 +      please_cite(fplog,"Fritsch12");
 +      please_cite(fplog,"Junghans10");
 +    }
 +    /* Initiate variables */
 +    clear_mat(force_vir);
 +    clear_mat(shake_vir);
 +    clear_rvec(mu_tot);
 +
 +    debug_gmx();
 +}
 +
index 24044fb31f3b2dfbba97b66343207bf808f39422,0000000000000000000000000000000000000000..47dc8c6a369085425ede79462b91a5e3743bcb23
mode 100644,000000..100644
--- /dev/null
@@@ -1,1428 -1,0 +1,1428 @@@
-   snew_aligned(table.data, 12*(nx+1)*sizeof(real),16);
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * GROwing Monsters And Cloning Shrimps
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +#include "maths.h"
 +#include "typedefs.h"
 +#include "names.h"
 +#include "smalloc.h"
 +#include "gmx_fatal.h"
 +#include "futil.h"
 +#include "xvgr.h"
 +#include "vec.h"
 +#include "main.h"
 +#include "network.h"
 +#include "physics.h"
 +#include "force.h"
 +#include "gmxfio.h"
 +#include "macros.h"
 +#include "tables.h"
 +
 +/* All the possible (implemented) table functions */
 +enum { 
 +  etabLJ6,   
 +  etabLJ12, 
 +  etabLJ6Shift, 
 +  etabLJ12Shift, 
 +  etabShift,
 +  etabRF,
 +  etabRF_ZERO,
 +  etabCOUL, 
 +  etabEwald, 
 +  etabEwaldSwitch, 
 +  etabEwaldUser,
 +  etabEwaldUserSwitch,
 +  etabLJ6Switch, 
 +  etabLJ12Switch, 
 +  etabCOULSwitch, 
 +  etabLJ6Encad, 
 +  etabLJ12Encad, 
 +  etabCOULEncad,  
 +  etabEXPMIN, 
 +  etabUSER, 
 +  etabNR 
 +};
 +
 +/** Evaluates to true if the table type contains user data. */
 +#define ETAB_USER(e)  ((e) == etabUSER || \
 +                       (e) == etabEwaldUser || (e) == etabEwaldUserSwitch)
 +
 +typedef struct {
 +  const char *name;
 +  gmx_bool bCoulomb;
 +} t_tab_props;
 +
 +/* This structure holds name and a flag that tells whether 
 +   this is a Coulomb type funtion */
 +static const t_tab_props tprops[etabNR] = {
 +  { "LJ6",  FALSE },
 +  { "LJ12", FALSE },
 +  { "LJ6Shift", FALSE },
 +  { "LJ12Shift", FALSE },
 +  { "Shift", TRUE },
 +  { "RF", TRUE },
 +  { "RF-zero", TRUE },
 +  { "COUL", TRUE },
 +  { "Ewald", TRUE },
 +  { "Ewald-Switch", TRUE },
 +  { "Ewald-User", TRUE },
 +  { "Ewald-User-Switch", TRUE },
 +  { "LJ6Switch", FALSE },
 +  { "LJ12Switch", FALSE },
 +  { "COULSwitch", TRUE },
 +  { "LJ6-Encad shift", FALSE },
 +  { "LJ12-Encad shift", FALSE },
 +  { "COUL-Encad shift",  TRUE },
 +  { "EXPMIN", FALSE },
 +  { "USER", FALSE }
 +};
 +
 +/* Index in the table that says which function to use */
 +enum { etiCOUL, etiLJ6, etiLJ12, etiNR };
 +
 +typedef struct {
 +  int  nx,nx0;
 +  double tabscale;
 +  double *x,*v,*f;
 +} t_tabledata;
 +
 +#define pow2(x) ((x)*(x))
 +#define pow3(x) ((x)*(x)*(x))
 +#define pow4(x) ((x)*(x)*(x)*(x))
 +#define pow5(x) ((x)*(x)*(x)*(x)*(x))
 +
 +
 +static double v_ewald_lr(double beta,double r)
 +{
 +    if (r == 0)
 +    {
 +        return beta*2/sqrt(M_PI);
 +    }
 +    else
 +    {
 +        return gmx_erfd(beta*r)/r;
 +    }
 +}
 +
 +void table_spline3_fill_ewald_lr(real *table_f,
 +                                 real *table_v,
 +                                 real *table_fdv0,
 +                                 int   ntab,
 +                                 real  dx,
 +                                 real  beta)
 +{
 +    real tab_max;
 +    int i,i_inrange;
 +    double dc,dc_new;
 +    gmx_bool bOutOfRange;
 +    double v_r0,v_r1,v_inrange,vi,a0,a1,a2dx;
 +    double x_r0;
 +
 +    if (ntab < 2)
 +    {
 +        gmx_fatal(FARGS,"Can not make a spline table with less than 2 points");
 +    }
 +
 +    /* We need some margin to be able to divide table values by r
 +     * in the kernel and also to do the integration arithmetics
 +     * without going out of range. Furthemore, we divide by dx below.
 +     */
 +    tab_max = GMX_REAL_MAX*0.0001;
 +
 +    /* This function produces a table with:
 +     * maximum energy error: V'''/(6*12*sqrt(3))*dx^3
 +     * maximum force error:  V'''/(6*4)*dx^2
 +     * The rms force error is the max error times 1/sqrt(5)=0.45.
 +     */
 +
 +    bOutOfRange = FALSE;
 +    i_inrange = ntab;
 +    v_inrange = 0;
 +    dc = 0;
 +    for(i=ntab-1; i>=0; i--)
 +    {
 +        x_r0 = i*dx;
 +
 +        v_r0 = v_ewald_lr(beta,x_r0);
 +
 +        if (!bOutOfRange)
 +        {
 +            i_inrange = i;
 +            v_inrange = v_r0;
 +    
 +            vi = v_r0;
 +        }
 +        else
 +        {
 +            /* Linear continuation for the last point in range */
 +            vi = v_inrange - dc*(i - i_inrange)*dx;
 +        }
 +
 +        if(table_v!=NULL)
 +        {
 +            table_v[i] = vi;
 +        }
 +
 +        if (i == 0)
 +        {
 +            continue;
 +        }
 +
 +        /* Get the potential at table point i-1 */
 +        v_r1 = v_ewald_lr(beta,(i-1)*dx);
 +
 +        if (v_r1 != v_r1 || v_r1 < -tab_max || v_r1 > tab_max)
 +        {
 +            bOutOfRange = TRUE;
 +        }
 +
 +        if (!bOutOfRange)
 +        {
 +            /* Calculate the average second derivative times dx over interval i-1 to i.
 +             * Using the function values at the end points and in the middle.
 +             */
 +            a2dx = (v_r0 + v_r1 - 2*v_ewald_lr(beta,x_r0-0.5*dx))/(0.25*dx);
 +            /* Set the derivative of the spline to match the difference in potential
 +             * over the interval plus the average effect of the quadratic term.
 +             * This is the essential step for minimizing the error in the force.
 +             */
 +            dc = (v_r0 - v_r1)/dx + 0.5*a2dx;
 +        }
 +
 +        if (i == ntab - 1)
 +        {
 +            /* Fill the table with the force, minus the derivative of the spline */
 +            table_f[i] = -dc;
 +        }
 +        else
 +        {
 +            /* tab[i] will contain the average of the splines over the two intervals */
 +            table_f[i] += -0.5*dc;
 +        }
 +
 +        if (!bOutOfRange)
 +        {
 +            /* Make spline s(x) = a0 + a1*(x - xr) + 0.5*a2*(x - xr)^2
 +             * matching the potential at the two end points
 +             * and the derivative dc at the end point xr.
 +             */
 +            a0   = v_r0;
 +            a1   = dc;
 +            a2dx = (a1*dx + v_r1 - a0)*2/dx;
 +
 +            /* Set dc to the derivative at the next point */
 +            dc_new = a1 - a2dx;
 +                
 +            if (dc_new != dc_new || dc_new < -tab_max || dc_new > tab_max)
 +            {
 +                bOutOfRange = TRUE;
 +            }
 +            else
 +            {
 +                dc = dc_new;
 +            }
 +        }
 +
 +        table_f[(i-1)] = -0.5*dc;
 +    }
 +    /* Currently the last value only contains half the force: double it */
 +    table_f[0] *= 2;
 +
 +    if(table_v!=NULL && table_fdv0!=NULL)
 +    {
 +        /* Copy to FDV0 table too. Allocation occurs in forcerec.c,
 +         * init_ewald_f_table().
 +         */
 +        for(i=0;i<ntab-1;i++)
 +        {
 +            table_fdv0[4*i]     = table_f[i];
 +            table_fdv0[4*i+1]   = table_f[i+1]-table_f[i];
 +            table_fdv0[4*i+2]   = table_v[i];
 +            table_fdv0[4*i+3]   = 0.0;
 +        }
 +        table_fdv0[4*(ntab-1)]    = table_f[(ntab-1)];
 +        table_fdv0[4*(ntab-1)+1]  = -table_f[(ntab-1)];
 +        table_fdv0[4*(ntab-1)+2]  = table_v[(ntab-1)];
 +        table_fdv0[4*(ntab-1)+3]  = 0.0;
 +    }
 +}
 +
 +/* The scale (1/spacing) for third order spline interpolation
 + * of the Ewald mesh contribution which needs to be subtracted
 + * from the non-bonded interactions.
 + */
 +real ewald_spline3_table_scale(real ewaldcoeff,real rc)
 +{
 +    double erf_x_d3=1.0522; /* max of (erf(x)/x)''' */
 +    double ftol,etol;
 +    double sc_f,sc_e;
 +
 +    /* Force tolerance: single precision accuracy */
 +    ftol = GMX_FLOAT_EPS;
 +    sc_f = sqrt(erf_x_d3/(6*4*ftol*ewaldcoeff))*ewaldcoeff;
 +
 +    /* Energy tolerance: 10x more accurate than the cut-off jump */
 +    etol = 0.1*gmx_erfc(ewaldcoeff*rc);
 +    etol = max(etol,GMX_REAL_EPS);
 +    sc_e = pow(erf_x_d3/(6*12*sqrt(3)*etol),1.0/3.0)*ewaldcoeff;
 +
 +    return max(sc_f,sc_e);
 +}
 +
 +/* Calculate the potential and force for an r value
 + * in exactly the same way it is done in the inner loop.
 + * VFtab is a pointer to the table data, offset is
 + * the point where we should begin and stride is 
 + * 4 if we have a buckingham table, 3 otherwise.
 + * If you want to evaluate table no N, set offset to 4*N.
 + *  
 + * We use normal precision here, since that is what we
 + * will use in the inner loops.
 + */
 +static void evaluate_table(real VFtab[], int offset, int stride, 
 +                         real tabscale, real r, real *y, real *yp)
 +{
 +  int n;
 +  real rt,eps,eps2;
 +  real Y,F,Geps,Heps2,Fp;
 +
 +  rt       =  r*tabscale;
 +  n        =  (int)rt;
 +  eps      =  rt - n;
 +  eps2     =  eps*eps;
 +  n        =  offset+stride*n;
 +  Y        =  VFtab[n];
 +  F        =  VFtab[n+1];
 +  Geps     =  eps*VFtab[n+2];
 +  Heps2    =  eps2*VFtab[n+3];
 +  Fp       =  F+Geps+Heps2;
 +  *y       =  Y+eps*Fp;
 +  *yp      =  (Fp+Geps+2.0*Heps2)*tabscale;
 +}
 +
 +static void copy2table(int n,int offset,int stride,
 +                     double x[],double Vtab[],double Ftab[],real scalefactor,
 +                     real dest[])
 +{
 +/* Use double prec. for the intermediary variables
 + * and temporary x/vtab/vtab2 data to avoid unnecessary 
 + * loss of precision.
 + */
 +  int  i,nn0;
 +  double F,G,H,h;
 +
 +  h = 0;
 +  for(i=0; (i<n); i++) {
 +    if (i < n-1) {
 +      h   = x[i+1] - x[i];
 +      F   = -Ftab[i]*h;
 +      G   =  3*(Vtab[i+1] - Vtab[i]) + (Ftab[i+1] + 2*Ftab[i])*h;
 +      H   = -2*(Vtab[i+1] - Vtab[i]) - (Ftab[i+1] +   Ftab[i])*h;
 +    } else {
 +      /* Fill the last entry with a linear potential,
 +       * this is mainly for rounding issues with angle and dihedral potentials.
 +       */
 +      F   = -Ftab[i]*h;
 +      G   = 0;
 +      H   = 0;
 +    }
 +    nn0 = offset + i*stride;
 +    dest[nn0]   = scalefactor*Vtab[i];
 +    dest[nn0+1] = scalefactor*F;
 +    dest[nn0+2] = scalefactor*G;
 +    dest[nn0+3] = scalefactor*H;
 +  }
 +}
 +
 +static void init_table(FILE *fp,int n,int nx0,
 +                     double tabscale,t_tabledata *td,gmx_bool bAlloc)
 +{
 +  int i;
 +  
 +  td->nx  = n;
 +  td->nx0 = nx0;
 +  td->tabscale = tabscale;
 +  if (bAlloc) {
 +    snew(td->x,td->nx);
 +    snew(td->v,td->nx);
 +    snew(td->f,td->nx);
 +  }
 +  for(i=0; (i<td->nx); i++)
 +    td->x[i] = i/tabscale;
 +}
 +
 +static void spline_forces(int nx,double h,double v[],gmx_bool bS3,gmx_bool bE3,
 +                        double f[])
 +{
 +  int    start,end,i;
 +  double v3,b_s,b_e,b;
 +  double beta,*gamma;
 +
 +  /* Formulas can be found in:
 +   * H.J.C. Berendsen, Simulating the Physical World, Cambridge 2007
 +   */
 +
 +  if (nx < 4 && (bS3 || bE3))
 +    gmx_fatal(FARGS,"Can not generate splines with third derivative boundary conditions with less than 4 (%d) points",nx);
 +  
 +  /* To make life easy we initially set the spacing to 1
 +   * and correct for this at the end.
 +   */
 +  beta = 2;
 +  if (bS3) {
 +    /* Fit V''' at the start */
 +    v3  = v[3] - 3*v[2] + 3*v[1] - v[0];
 +    if (debug)
 +      fprintf(debug,"The left third derivative is %g\n",v3/(h*h*h));
 +    b_s = 2*(v[1] - v[0]) + v3/6;
 +    start = 0;
 +    
 +    if (FALSE) {
 +      /* Fit V'' at the start */
 +      real v2;
 +      
 +      v2  = -v[3] + 4*v[2] - 5*v[1] + 2*v[0];
 +      /* v2  = v[2] - 2*v[1] + v[0]; */
 +      if (debug)
 +      fprintf(debug,"The left second derivative is %g\n",v2/(h*h));
 +      b_s = 3*(v[1] - v[0]) - v2/2;
 +      start = 0;
 +    }
 +  } else {
 +    b_s = 3*(v[2] - v[0]) + f[0]*h;
 +    start = 1;
 +  }
 +  if (bE3) {
 +    /* Fit V''' at the end */
 +    v3  = v[nx-1] - 3*v[nx-2] + 3*v[nx-3] - v[nx-4];
 +    if (debug)
 +      fprintf(debug,"The right third derivative is %g\n",v3/(h*h*h));
 +    b_e = 2*(v[nx-1] - v[nx-2]) + v3/6;
 +    end = nx;
 +  } else {
 +    /* V'=0 at the end */
 +    b_e = 3*(v[nx-1] - v[nx-3]) + f[nx-1]*h;
 +    end = nx - 1;
 +  }
 +
 +  snew(gamma,nx);
 +  beta = (bS3 ? 1 : 4);
 +
 +  /* For V'' fitting */
 +  /* beta = (bS3 ? 2 : 4); */
 +
 +  f[start] = b_s/beta;
 +  for(i=start+1; i<end; i++) {
 +    gamma[i] = 1/beta;
 +    beta = 4 - gamma[i];
 +    b    =  3*(v[i+1] - v[i-1]);
 +    f[i] = (b - f[i-1])/beta;
 +  }
 +  gamma[end-1] = 1/beta;
 +  beta = (bE3 ? 1 : 4) - gamma[end-1];
 +  f[end-1] = (b_e - f[end-2])/beta;
 +
 +  for(i=end-2; i>=start; i--)
 +    f[i] -= gamma[i+1]*f[i+1];
 +  sfree(gamma);
 +
 +  /* Correct for the minus sign and the spacing */
 +  for(i=start; i<end; i++)
 +    f[i] = -f[i]/h;
 +}
 +
 +static void set_forces(FILE *fp,int angle,
 +                     int nx,double h,double v[],double f[],
 +                     int table)
 +{
 +  int start,end;
 +
 +  if (angle == 2)
 +    gmx_fatal(FARGS,
 +            "Force generation for dihedral tables is not (yet) implemented");
 +
 +  start = 0;
 +  while (v[start] == 0)
 +    start++;
 +  
 +  end = nx;
 +  while(v[end-1] == 0)
 +    end--;
 +  if (end > nx - 2)
 +    end = nx;
 +  else
 +    end++;
 +
 +  if (fp)
 +    fprintf(fp,"Generating forces for table %d, boundary conditions: V''' at %g, %s at %g\n",
 +          table+1,start*h,end==nx ? "V'''" : "V'=0",(end-1)*h);
 +  spline_forces(end-start,h,v+start,TRUE,end==nx,f+start);
 +}
 +
 +static void read_tables(FILE *fp,const char *fn,
 +                      int ntab,int angle,t_tabledata td[])
 +{
 +  char *libfn;
 +  char buf[STRLEN];
 +  double **yy=NULL,start,end,dx0,dx1,ssd,vm,vp,f,numf;
 +  int  k,i,nx,nx0=0,ny,nny,ns;
 +  gmx_bool bAllZero,bZeroV,bZeroF;
 +  double tabscale;
 +
 +  nny = 2*ntab+1;  
 +  libfn = gmxlibfn(fn);
 +  nx  = read_xvg(libfn,&yy,&ny);
 +  if (ny != nny)
 +    gmx_fatal(FARGS,"Trying to read file %s, but nr columns = %d, should be %d",
 +              libfn,ny,nny);
 +  if (angle == 0) {
 +    if (yy[0][0] != 0.0)
 +      gmx_fatal(FARGS,
 +              "The first distance in file %s is %f nm instead of %f nm",
 +              libfn,yy[0][0],0.0);
 +  } else {
 +    if (angle == 1)
 +      start = 0.0;
 +    else
 +      start = -180.0;
 +    end = 180.0;
 +    if (yy[0][0] != start || yy[0][nx-1] != end)
 +      gmx_fatal(FARGS,"The angles in file %s should go from %f to %f instead of %f to %f\n",
 +              libfn,start,end,yy[0][0],yy[0][nx-1]);
 +  }
 +
 +  tabscale = (nx-1)/(yy[0][nx-1] - yy[0][0]);
 +  
 +  if (fp) {
 +    fprintf(fp,"Read user tables from %s with %d data points.\n",libfn,nx);
 +    if (angle == 0)
 +      fprintf(fp,"Tabscale = %g points/nm\n",tabscale);
 +  }
 +
 +  bAllZero = TRUE;
 +  for(k=0; k<ntab; k++) {
 +    bZeroV = TRUE;
 +    bZeroF = TRUE;
 +    for(i=0; (i < nx); i++) {
 +      if (i >= 2) {
 +      dx0 = yy[0][i-1] - yy[0][i-2];
 +      dx1 = yy[0][i]   - yy[0][i-1];
 +      /* Check for 1% deviation in spacing */
 +      if (fabs(dx1 - dx0) >= 0.005*(fabs(dx0) + fabs(dx1))) {
 +        gmx_fatal(FARGS,"In table file '%s' the x values are not equally spaced: %f %f %f",fn,yy[0][i-2],yy[0][i-1],yy[0][i]);
 +      }
 +      }
 +      if (yy[1+k*2][i] != 0) {
 +      bZeroV = FALSE;
 +      if (bAllZero) {
 +        bAllZero = FALSE;
 +        nx0 = i;
 +      }
 +      if (yy[1+k*2][i] >  0.01*GMX_REAL_MAX ||
 +          yy[1+k*2][i] < -0.01*GMX_REAL_MAX) {
 +        gmx_fatal(FARGS,"Out of range potential value %g in file '%s'",
 +                  yy[1+k*2][i],fn);
 +      }
 +      }
 +      if (yy[1+k*2+1][i] != 0) {
 +      bZeroF = FALSE;
 +      if (bAllZero) {
 +        bAllZero = FALSE;
 +        nx0 = i;
 +      }
 +      if (yy[1+k*2+1][i] >  0.01*GMX_REAL_MAX ||
 +          yy[1+k*2+1][i] < -0.01*GMX_REAL_MAX) {
 +        gmx_fatal(FARGS,"Out of range force value %g in file '%s'",
 +                  yy[1+k*2+1][i],fn);
 +      }
 +      }
 +    }
 +
 +    if (!bZeroV && bZeroF) {
 +      set_forces(fp,angle,nx,1/tabscale,yy[1+k*2],yy[1+k*2+1],k);
 +    } else {
 +      /* Check if the second column is close to minus the numerical
 +       * derivative of the first column.
 +       */
 +      ssd = 0;
 +      ns = 0;
 +      for(i=1; (i < nx-1); i++) {
 +      vm = yy[1+2*k][i-1];
 +      vp = yy[1+2*k][i+1];
 +      f  = yy[1+2*k+1][i];
 +      if (vm != 0 && vp != 0 && f != 0) {
 +        /* Take the centered difference */
 +        numf = -(vp - vm)*0.5*tabscale;
 +        ssd += fabs(2*(f - numf)/(f + numf));
 +        ns++;
 +      }
 +      }
 +      if (ns > 0) {
 +      ssd /= ns;
 +      sprintf(buf,"For the %d non-zero entries for table %d in %s the forces deviate on average %d%% from minus the numerical derivative of the potential\n",ns,k,libfn,(int)(100*ssd+0.5));
 +      if (debug)
 +        fprintf(debug,"%s",buf);
 +      if (ssd > 0.2) {
 +        if (fp)
 +          fprintf(fp,"\nWARNING: %s\n",buf);
 +        fprintf(stderr,"\nWARNING: %s\n",buf);
 +      }
 +      }
 +    }
 +  }
 +  if (bAllZero && fp) {
 +    fprintf(fp,"\nNOTE: All elements in table %s are zero\n\n",libfn);
 +  }
 +
 +  for(k=0; (k<ntab); k++) {
 +    init_table(fp,nx,nx0,tabscale,&(td[k]),TRUE);
 +    for(i=0; (i<nx); i++) {
 +      td[k].x[i] = yy[0][i];
 +      td[k].v[i] = yy[2*k+1][i];
 +      td[k].f[i] = yy[2*k+2][i];
 +    }
 +  }
 +  for(i=0; (i<ny); i++)
 +    sfree(yy[i]);
 +  sfree(yy);
 +  sfree(libfn);
 +}
 +
 +static void done_tabledata(t_tabledata *td)
 +{
 +  int i;
 +  
 +  if (!td)
 +    return;
 +    
 +  sfree(td->x);
 +  sfree(td->v);
 +  sfree(td->f);
 +}
 +
 +static void fill_table(t_tabledata *td,int tp,const t_forcerec *fr)
 +{
 +  /* Fill the table according to the formulas in the manual.
 +   * In principle, we only need the potential and the second
 +   * derivative, but then we would have to do lots of calculations
 +   * in the inner loop. By precalculating some terms (see manual)
 +   * we get better eventual performance, despite a larger table.
 +   *
 +   * Since some of these higher-order terms are very small,
 +   * we always use double precision to calculate them here, in order
 +   * to avoid unnecessary loss of precision.
 +   */
 +#ifdef DEBUG_SWITCH
 +  FILE *fp;
 +#endif
 +  int  i;
 +  double reppow,p;
 +  double r1,rc,r12,r13;
 +  double r,r2,r6,rc6;
 +  double expr,Vtab,Ftab;
 +  /* Parameters for David's function */
 +  double A=0,B=0,C=0,A_3=0,B_4=0;
 +  /* Parameters for the switching function */
 +  double ksw,swi,swi1;
 +  /* Temporary parameters */
 +  gmx_bool bSwitch,bShift;
 +  double ewc=fr->ewaldcoeff;
 +   
 +  bSwitch = ((tp == etabLJ6Switch) || (tp == etabLJ12Switch) || 
 +           (tp == etabCOULSwitch) ||
 +           (tp == etabEwaldSwitch) || (tp == etabEwaldUserSwitch));
 +  bShift  = ((tp == etabLJ6Shift) || (tp == etabLJ12Shift) || 
 +           (tp == etabShift));
 +
 +  reppow = fr->reppow;
 +
 +  if (tprops[tp].bCoulomb) {
 +    r1 = fr->rcoulomb_switch;
 +    rc = fr->rcoulomb;
 +  } 
 +  else {
 +    r1 = fr->rvdw_switch;
 +    rc = fr->rvdw;
 +  }
 +  if (bSwitch)
 +    ksw  = 1.0/(pow5(rc-r1));
 +  else
 +    ksw  = 0.0;
 +  if (bShift) {
 +    if (tp == etabShift)
 +      p = 1;
 +    else if (tp == etabLJ6Shift) 
 +      p = 6; 
 +    else 
 +      p = reppow;
 +    
 +    A = p * ((p+1)*r1-(p+4)*rc)/(pow(rc,p+2)*pow2(rc-r1));
 +    B = -p * ((p+1)*r1-(p+3)*rc)/(pow(rc,p+2)*pow3(rc-r1));
 +    C = 1.0/pow(rc,p)-A/3.0*pow3(rc-r1)-B/4.0*pow4(rc-r1);
 +    if (tp == etabLJ6Shift) {
 +      A=-A;
 +      B=-B;
 +      C=-C;
 +    }
 +    A_3=A/3.0;
 +    B_4=B/4.0;
 +  }
 +  if (debug) { fprintf(debug,"Setting up tables\n"); fflush(debug); }
 +    
 +#ifdef DEBUG_SWITCH
 +  fp=xvgropen("switch.xvg","switch","r","s");
 +#endif
 +  
 +  for(i=td->nx0; (i<td->nx); i++) {
 +    r     = td->x[i];
 +    r2    = r*r;
 +    r6    = 1.0/(r2*r2*r2);
 +    if (gmx_within_tol(reppow,12.0,10*GMX_DOUBLE_EPS)) {
 +      r12 = r6*r6;
 +    } else {
 +      r12 = pow(r,-reppow);   
 +    }
 +    Vtab  = 0.0;
 +    Ftab  = 0.0;
 +    if (bSwitch) {
 +      /* swi is function, swi1 1st derivative and swi2 2nd derivative */
 +      /* The switch function is 1 for r<r1, 0 for r>rc, and smooth for
 +       * r1<=r<=rc. The 1st and 2nd derivatives are both zero at
 +       * r1 and rc.
 +       * ksw is just the constant 1/(rc-r1)^5, to save some calculations...
 +       */ 
 +      if(r<=r1) {
 +      swi  = 1.0;
 +      swi1 = 0.0;
 +      } else if (r>=rc) {
 +      swi  = 0.0;
 +      swi1 = 0.0;
 +      } else {
 +      swi      = 1 - 10*pow3(r-r1)*ksw*pow2(rc-r1) 
 +        + 15*pow4(r-r1)*ksw*(rc-r1) - 6*pow5(r-r1)*ksw;
 +      swi1     = -30*pow2(r-r1)*ksw*pow2(rc-r1) 
 +        + 60*pow3(r-r1)*ksw*(rc-r1) - 30*pow4(r-r1)*ksw;
 +      }
 +    }
 +    else { /* not really needed, but avoids compiler warnings... */
 +      swi  = 1.0;
 +      swi1 = 0.0;
 +    }
 +#ifdef DEBUG_SWITCH
 +    fprintf(fp,"%10g  %10g  %10g  %10g\n",r,swi,swi1,swi2);
 +#endif
 +
 +    rc6 = rc*rc*rc;
 +    rc6 = 1.0/(rc6*rc6);
 +
 +    switch (tp) {
 +    case etabLJ6:
 +            /* Dispersion */
 +            Vtab = -r6;
 +            Ftab = 6.0*Vtab/r;
 +            break;
 +    case etabLJ6Switch:
 +    case etabLJ6Shift:
 +      /* Dispersion */
 +      if (r < rc) {      
 +          Vtab = -r6;
 +          Ftab = 6.0*Vtab/r;
 +          break;
 +      }
 +      break;
 +    case etabLJ12:
 +            /* Repulsion */
 +            Vtab  = r12;
 +            Ftab  = reppow*Vtab/r;
 +      break;
 +    case etabLJ12Switch:
 +    case etabLJ12Shift:
 +      /* Repulsion */
 +      if (r < rc) {                
 +          Vtab  = r12;
 +          Ftab  = reppow*Vtab/r;
 +      }
 +      break;
 +      case etabLJ6Encad:
 +        if(r < rc) {
 +            Vtab  = -(r6-6.0*(rc-r)*rc6/rc-rc6);
 +            Ftab  = -(6.0*r6/r-6.0*rc6/rc);
 +        } else { /* r>rc */ 
 +            Vtab  = 0;
 +            Ftab  = 0;
 +        } 
 +        break;
 +    case etabLJ12Encad:
 +        if(r < rc) {
 +            Vtab  = -(r6-6.0*(rc-r)*rc6/rc-rc6);
 +            Ftab  = -(6.0*r6/r-6.0*rc6/rc);
 +        } else { /* r>rc */
 +            Vtab  = 0;
 +            Ftab  = 0;
 +        } 
 +        break;        
 +    case etabCOUL:
 +      Vtab  = 1.0/r;
 +      Ftab  = 1.0/r2;
 +      break;
 +    case etabCOULSwitch:
 +    case etabShift:
 +      if (r < rc) { 
 +      Vtab  = 1.0/r;
 +      Ftab  = 1.0/r2;
 +      }
 +      break;
 +    case etabEwald:
 +    case etabEwaldSwitch:
 +      Vtab  = gmx_erfc(ewc*r)/r;
 +      Ftab  = gmx_erfc(ewc*r)/r2+exp(-(ewc*ewc*r2))*ewc*M_2_SQRTPI/r;
 +      break;
 +    case etabEwaldUser:
 +    case etabEwaldUserSwitch:
 +      /* Only calculate minus the reciprocal space contribution */
 +      Vtab  = -gmx_erf(ewc*r)/r;
 +      Ftab  = -gmx_erf(ewc*r)/r2+exp(-(ewc*ewc*r2))*ewc*M_2_SQRTPI/r;
 +      break;
 +    case etabRF:
 +    case etabRF_ZERO:
 +      Vtab  = 1.0/r      +   fr->k_rf*r2 - fr->c_rf;
 +      Ftab  = 1.0/r2     - 2*fr->k_rf*r;
 +      if (tp == etabRF_ZERO && r >= rc) {
 +      Vtab = 0;
 +      Ftab = 0;
 +      }
 +      break;
 +    case etabEXPMIN:
 +      expr  = exp(-r);
 +      Vtab  = expr;
 +      Ftab  = expr;
 +      break;
 +    case etabCOULEncad:
 +        if(r < rc) {
 +            Vtab  = 1.0/r-(rc-r)/(rc*rc)-1.0/rc;
 +            Ftab  = 1.0/r2-1.0/(rc*rc);
 +        } else { /* r>rc */ 
 +            Vtab  = 0;
 +            Ftab  = 0;
 +        } 
 +        break;
 +    default:
 +      gmx_fatal(FARGS,"Table type %d not implemented yet. (%s,%d)",
 +                tp,__FILE__,__LINE__);
 +    }
 +    if (bShift) {
 +      /* Normal coulomb with cut-off correction for potential */
 +      if (r < rc) {
 +      Vtab -= C;
 +      /* If in Shifting range add something to it */
 +      if (r > r1) {
 +        r12 = (r-r1)*(r-r1);
 +        r13 = (r-r1)*r12;
 +        Vtab  += - A_3*r13 - B_4*r12*r12;
 +        Ftab  +=   A*r12 + B*r13;
 +      }
 +      }
 +    }
 +
 +    if (ETAB_USER(tp)) {
 +      Vtab += td->v[i];
 +      Ftab += td->f[i];
 +    }
 +
 +    if ((r > r1) && bSwitch) {
 +      Ftab = Ftab*swi - Vtab*swi1;
 +      Vtab = Vtab*swi;
 +    }
 +
 +    /* Convert to single precision when we store to mem */
 +    td->v[i]  = Vtab;
 +    td->f[i]  = Ftab;
 +  }
 +
 +  /* Continue the table linearly from nx0 to 0.
 +   * These values are only required for energy minimization with overlap or TPI.
 +   */
 +  for(i=td->nx0-1; i>=0; i--) {
 +    td->v[i] = td->v[i+1] + td->f[i+1]*(td->x[i+1] - td->x[i]);
 +    td->f[i] = td->f[i+1];
 +  }
 +
 +#ifdef DEBUG_SWITCH
 +  gmx_fio_fclose(fp);
 +#endif
 +}
 +
 +static void set_table_type(int tabsel[],const t_forcerec *fr,gmx_bool b14only)
 +{
 +  int eltype,vdwtype;
 +
 +  /* Set the different table indices.
 +   * Coulomb first.
 +   */
 +
 +
 +  if (b14only) {
 +    switch (fr->eeltype) {
 +    case eelRF_NEC:
 +      eltype = eelRF;
 +      break;
 +    case eelUSER:
 +    case eelPMEUSER:
 +    case eelPMEUSERSWITCH:
 +      eltype = eelUSER;
 +      break;
 +    default:
 +      eltype = eelCUT;
 +    }
 +  } else {
 +    eltype = fr->eeltype;
 +  }
 +  
 +  switch (eltype) {
 +  case eelCUT:
 +    tabsel[etiCOUL] = etabCOUL;
 +    break;
 +  case eelPOISSON:
 +    tabsel[etiCOUL] = etabShift;
 +    break;
 +  case eelSHIFT:
 +    if (fr->rcoulomb > fr->rcoulomb_switch)
 +      tabsel[etiCOUL] = etabShift;
 +    else
 +      tabsel[etiCOUL] = etabCOUL;
 +    break;
 +  case eelEWALD:
 +  case eelPME:
 +  case eelP3M_AD:
 +    tabsel[etiCOUL] = etabEwald;
 +    break;
 +  case eelPMESWITCH:
 +    tabsel[etiCOUL] = etabEwaldSwitch;
 +    break;
 +  case eelPMEUSER:
 +    tabsel[etiCOUL] = etabEwaldUser;
 +    break;
 +  case eelPMEUSERSWITCH:
 +    tabsel[etiCOUL] = etabEwaldUserSwitch;
 +    break;
 +  case eelRF:
 +  case eelGRF:
 +  case eelRF_NEC:
 +    tabsel[etiCOUL] = etabRF;
 +    break;
 +  case eelRF_ZERO:
 +    tabsel[etiCOUL] = etabRF_ZERO;
 +    break;
 +  case eelSWITCH:
 +    tabsel[etiCOUL] = etabCOULSwitch;
 +    break;
 +  case eelUSER:
 +    tabsel[etiCOUL] = etabUSER;
 +    break;
 +  case eelENCADSHIFT:
 +    tabsel[etiCOUL] = etabCOULEncad;
 +    break;      
 +  default:
 +    gmx_fatal(FARGS,"Invalid eeltype %d",eltype);
 +  }
 +  
 +  /* Van der Waals time */
 +  if (fr->bBHAM && !b14only) {
 +    tabsel[etiLJ6]  = etabLJ6;
 +    tabsel[etiLJ12] = etabEXPMIN;
 +  } else {
 +    if (b14only && fr->vdwtype != evdwUSER)
 +      vdwtype = evdwCUT;
 +    else
 +      vdwtype = fr->vdwtype;
 +
 +    switch (vdwtype) {
 +    case evdwSWITCH:
 +      tabsel[etiLJ6]  = etabLJ6Switch;
 +      tabsel[etiLJ12] = etabLJ12Switch;
 +      break;
 +    case evdwSHIFT:
 +      tabsel[etiLJ6]  = etabLJ6Shift;
 +      tabsel[etiLJ12] = etabLJ12Shift;
 +      break;
 +    case evdwUSER:
 +      tabsel[etiLJ6]  = etabUSER;
 +      tabsel[etiLJ12] = etabUSER;
 +      break;
 +    case evdwCUT:
 +      tabsel[etiLJ6]  = etabLJ6;
 +      tabsel[etiLJ12] = etabLJ12;
 +      break;
 +    case evdwENCADSHIFT:
 +      tabsel[etiLJ6]  = etabLJ6Encad;
 +      tabsel[etiLJ12] = etabLJ12Encad;
 +      break;
 +    default:
 +      gmx_fatal(FARGS,"Invalid vdwtype %d in %s line %d",vdwtype,
 +                __FILE__,__LINE__);
 +    } 
 +  }
 +}
 +
 +t_forcetable make_tables(FILE *out,const output_env_t oenv,
 +                         const t_forcerec *fr,
 +                       gmx_bool bVerbose,const char *fn,
 +                       real rtab,int flags)
 +{
 +  const char *fns[3] = { "ctab.xvg", "dtab.xvg", "rtab.xvg" };
 +  const char *fns14[3] = { "ctab14.xvg", "dtab14.xvg", "rtab14.xvg" };
 +  FILE        *fp;
 +  t_tabledata *td;
 +  gmx_bool        b14only,bReadTab,bGenTab;
 +  real        x0,y0,yp;
 +  int         i,j,k,nx,nx0,tabsel[etiNR];
 +  real        scalefactor;
 +
 +  t_forcetable table;
 +
 +  b14only = (flags & GMX_MAKETABLES_14ONLY);
 +
 +  if (flags & GMX_MAKETABLES_FORCEUSER) {
 +    tabsel[etiCOUL] = etabUSER;
 +    tabsel[etiLJ6]  = etabUSER;
 +    tabsel[etiLJ12] = etabUSER;
 +  } else {
 +    set_table_type(tabsel,fr,b14only);
 +  }
 +  snew(td,etiNR);
 +  table.r         = rtab;
 +  table.scale     = 0;
 +  table.n         = 0;
 +  table.scale_exp = 0;
 +  nx0             = 10;
 +  nx              = 0;
 +  
 +  table.interaction   = GMX_TABLE_INTERACTION_ELEC_VDWREP_VDWDISP;
 +  table.format        = GMX_TABLE_FORMAT_CUBICSPLINE_YFGH;
 +  table.formatsize    = 4;
 +  table.ninteractions = 3;
 +  table.stride        = table.formatsize*table.ninteractions;
 +
 +  /* Check whether we have to read or generate */
 +  bReadTab = FALSE;
 +  bGenTab  = FALSE;
 +  for(i=0; (i<etiNR); i++) {
 +    if (ETAB_USER(tabsel[i]))
 +      bReadTab = TRUE;
 +    if (tabsel[i] != etabUSER)
 +      bGenTab  = TRUE;
 +  }
 +  if (bReadTab) {
 +    read_tables(out,fn,etiNR,0,td);
 +    if (rtab == 0 || (flags & GMX_MAKETABLES_14ONLY)) {
 +      rtab      = td[0].x[td[0].nx-1];
 +      table.n   = td[0].nx;
 +      nx        = table.n;
 +    } else {
 +      if (td[0].x[td[0].nx-1] < rtab) 
 +      gmx_fatal(FARGS,"Tables in file %s not long enough for cut-off:\n"
 +                "\tshould be at least %f nm\n",fn,rtab);
 +      nx        = table.n = (int)(rtab*td[0].tabscale + 0.5);
 +    }
 +    table.scale = td[0].tabscale;
 +    nx0         = td[0].nx0;
 +  }
 +  if (bGenTab) {
 +    if (!bReadTab) {
 +#ifdef GMX_DOUBLE
 +      table.scale = 2000.0;
 +#else
 +      table.scale = 500.0;
 +#endif
 +      nx = table.n = rtab*table.scale;
 +    }
 +  }
 +  if (fr->bBHAM) {
 +    if(fr->bham_b_max!=0)
 +      table.scale_exp = table.scale/fr->bham_b_max;
 +    else
 +      table.scale_exp = table.scale;
 +  }
 +
 +  /* Each table type (e.g. coul,lj6,lj12) requires four 
 +   * numbers per nx+1 data points. For performance reasons we want
 +   * the table data to be aligned to 16-byte.
 +   */
-       snew_aligned(table.data,4*nx,16);
++  snew_aligned(table.data, 12*(nx+1)*sizeof(real),32);
 +
 +  for(k=0; (k<etiNR); k++) {
 +    if (tabsel[k] != etabUSER) {
 +      init_table(out,nx,nx0,
 +               (tabsel[k] == etabEXPMIN) ? table.scale_exp : table.scale,
 +               &(td[k]),!bReadTab);
 +      fill_table(&(td[k]),tabsel[k],fr);
 +      if (out) 
 +      fprintf(out,"%s table with %d data points for %s%s.\n"
 +              "Tabscale = %g points/nm\n",
 +              ETAB_USER(tabsel[k]) ? "Modified" : "Generated",
 +              td[k].nx,b14only?"1-4 ":"",tprops[tabsel[k]].name,
 +              td[k].tabscale);
 +    }
 +
 +    /* Set scalefactor for c6/c12 tables. This is because we save flops in the non-table kernels
 +     * by including the derivative constants (6.0 or 12.0) in the parameters, since
 +     * we no longer calculate force in most steps. This means the c6/c12 parameters
 +     * have been scaled up, so we need to scale down the table interactions too.
 +     * It comes here since we need to scale user tables too.
 +     */
 +      if(k==etiLJ6)
 +      {
 +          scalefactor = 1.0/6.0;
 +      }
 +      else if(k==etiLJ12 && tabsel[k]!=etabEXPMIN)
 +      {
 +          scalefactor = 1.0/12.0;
 +      }
 +      else
 +      {
 +          scalefactor = 1.0;
 +      }
 +
 +    copy2table(table.n,k*4,12,td[k].x,td[k].v,td[k].f,scalefactor,table.data);
 +    
 +    if (bDebugMode() && bVerbose) {
 +      if (b14only)
 +      fp=xvgropen(fns14[k],fns14[k],"r","V",oenv);
 +      else
 +      fp=xvgropen(fns[k],fns[k],"r","V",oenv);
 +      /* plot the output 5 times denser than the table data */
 +      for(i=5*((nx0+1)/2); i<5*table.n; i++) {
 +      x0 = i*table.r/(5*(table.n-1));
 +      evaluate_table(table.data,4*k,12,table.scale,x0,&y0,&yp);
 +      fprintf(fp,"%15.10e  %15.10e  %15.10e\n",x0,y0,yp);
 +      }
 +      gmx_fio_fclose(fp);
 +    }
 +    done_tabledata(&(td[k]));
 +  }
 +  sfree(td);
 +
 +  return table;
 +}
 +
 +t_forcetable make_gb_table(FILE *out,const output_env_t oenv,
 +                           const t_forcerec *fr,
 +                           const char *fn,
 +                           real rtab)
 +{
 +      const char *fns[3] = { "gbctab.xvg", "gbdtab.xvg", "gbrtab.xvg" };
 +      const char *fns14[3] = { "gbctab14.xvg", "gbdtab14.xvg", "gbrtab14.xvg" };
 +      FILE        *fp;
 +      t_tabledata *td;
 +      gmx_bool        bReadTab,bGenTab;
 +      real        x0,y0,yp;
 +      int         i,j,k,nx,nx0,tabsel[etiNR];
 +      double      r,r2,Vtab,Ftab,expterm;
 +      
 +      t_forcetable table;
 +      
 +      double abs_error_r, abs_error_r2;
 +      double rel_error_r, rel_error_r2;
 +      double rel_error_r_old=0, rel_error_r2_old=0;
 +      double x0_r_error, x0_r2_error;
 +      
 +      
 +      /* Only set a Coulomb table for GB */
 +      /* 
 +       tabsel[0]=etabGB;
 +       tabsel[1]=-1;
 +       tabsel[2]=-1;
 +      */
 +      
 +      /* Set the table dimensions for GB, not really necessary to
 +       * use etiNR (since we only have one table, but ...) 
 +       */
 +      snew(td,1);
 +    table.interaction   = GMX_TABLE_INTERACTION_ELEC;
 +    table.format        = GMX_TABLE_FORMAT_CUBICSPLINE_YFGH;
 +      table.r             = fr->gbtabr;
 +      table.scale         = fr->gbtabscale;
 +      table.scale_exp     = 0;
 +      table.n             = table.scale*table.r;
 +    table.formatsize    = 4;
 +    table.ninteractions = 1;
 +    table.stride        = table.formatsize*table.ninteractions;
 +      nx0                 = 0;
 +      nx                  = table.scale*table.r;
 +      
 +      /* Check whether we have to read or generate 
 +       * We will always generate a table, so remove the read code
 +       * (Compare with original make_table function
 +       */
 +      bReadTab = FALSE;
 +      bGenTab  = TRUE;
 +      
 +      /* Each table type (e.g. coul,lj6,lj12) requires four 
 +       * numbers per datapoint. For performance reasons we want
 +       * the table data to be aligned to 16-byte. This is accomplished
 +       * by allocating 16 bytes extra to a temporary pointer, and then
 +       * calculating an aligned pointer. This new pointer must not be
 +       * used in a free() call, but thankfully we're sloppy enough not
 +       * to do this :-)
 +       */
 +      
-     snew_aligned(table.data,4*nx,16);
++      snew_aligned(table.data,4*nx,32);
 +      
 +      init_table(out,nx,nx0,table.scale,&(td[0]),!bReadTab);
 +      
 +      /* Local implementation so we don't have to use the etabGB
 +       * enum above, which will cause problems later when
 +       * making the other tables (right now even though we are using
 +       * GB, the normal Coulomb tables will be created, but this
 +       * will cause a problem since fr->eeltype==etabGB which will not
 +       * be defined in fill_table and set_table_type
 +       */
 +      
 +      for(i=nx0;i<nx;i++)
 +    {
 +              Vtab    = 0.0;
 +              Ftab    = 0.0;
 +              r       = td->x[i];
 +              r2      = r*r;
 +              expterm = exp(-0.25*r2);
 +              
 +              Vtab = 1/sqrt(r2+expterm);
 +              Ftab = (r-0.25*r*expterm)/((r2+expterm)*sqrt(r2+expterm));
 +              
 +              /* Convert to single precision when we store to mem */
 +              td->v[i]  = Vtab;
 +              td->f[i]  = Ftab;
 +              
 +    }
 +      
 +      copy2table(table.n,0,4,td[0].x,td[0].v,td[0].f,1.0,table.data);
 +      
 +      if(bDebugMode())
 +    {
 +              fp=xvgropen(fns[0],fns[0],"r","V",oenv);
 +              /* plot the output 5 times denser than the table data */
 +              /* for(i=5*nx0;i<5*table.n;i++) */
 +              for(i=nx0;i<table.n;i++)
 +              {
 +                      /* x0=i*table.r/(5*table.n); */
 +                      x0=i*table.r/table.n;
 +                      evaluate_table(table.data,0,4,table.scale,x0,&y0,&yp);
 +                      fprintf(fp,"%15.10e  %15.10e  %15.10e\n",x0,y0,yp);
 +                      
 +              }
 +              gmx_fio_fclose(fp);
 +    }
 +      
 +      /*
 +       for(i=100*nx0;i<99.81*table.n;i++)
 +       {
 +       r = i*table.r/(100*table.n);
 +       r2      = r*r;
 +       expterm = exp(-0.25*r2);
 +       
 +       Vtab = 1/sqrt(r2+expterm);
 +       Ftab = (r-0.25*r*expterm)/((r2+expterm)*sqrt(r2+expterm));
 +       
 +       
 +       evaluate_table(table.data,0,4,table.scale,r,&y0,&yp);
 +       printf("gb: i=%d, x0=%g, y0=%15.15f, Vtab=%15.15f, yp=%15.15f, Ftab=%15.15f\n",i,r, y0, Vtab, yp, Ftab);
 +       
 +       abs_error_r=fabs(y0-Vtab);
 +       abs_error_r2=fabs(yp-(-1)*Ftab);
 +       
 +       rel_error_r=abs_error_r/y0;
 +       rel_error_r2=fabs(abs_error_r2/yp);
 +       
 +       
 +       if(rel_error_r>rel_error_r_old)
 +       {
 +       rel_error_r_old=rel_error_r;
 +       x0_r_error=x0;
 +       }
 +       
 +       if(rel_error_r2>rel_error_r2_old)
 +       {
 +       rel_error_r2_old=rel_error_r2;
 +       x0_r2_error=x0;        
 +       }
 +       }
 +       
 +       printf("gb: MAX REL ERROR IN R=%15.15f, MAX REL ERROR IN R2=%15.15f\n",rel_error_r_old, rel_error_r2_old);
 +       printf("gb: XO_R=%g, X0_R2=%g\n",x0_r_error, x0_r2_error);
 +       
 +       exit(1); */
 +      done_tabledata(&(td[0]));
 +      sfree(td);
 +      
 +      return table;
 +      
 +      
 +}
 +
 +t_forcetable make_atf_table(FILE *out,const output_env_t oenv,
 +                          const t_forcerec *fr,
 +                          const char *fn,
 +                            matrix box)
 +{
 +      const char *fns[3] = { "tf_tab.xvg", "atfdtab.xvg", "atfrtab.xvg" };
 +      FILE        *fp;
 +      t_tabledata *td;
 +      real        x0,y0,yp,rtab;
 +      int         i,nx,nx0;
 +        real        rx, ry, rz, box_r;
 +      
 +      t_forcetable table;
 +      
 +      
 +      /* Set the table dimensions for ATF, not really necessary to
 +       * use etiNR (since we only have one table, but ...) 
 +       */
 +      snew(td,1);
 +        
 +        if (fr->adress_type == eAdressSphere){
 +            /* take half box diagonal direction as tab range */
 +               rx = 0.5*box[0][0]+0.5*box[1][0]+0.5*box[2][0];
 +               ry = 0.5*box[0][1]+0.5*box[1][1]+0.5*box[2][1];
 +               rz = 0.5*box[0][2]+0.5*box[1][2]+0.5*box[2][2];
 +               box_r = sqrt(rx*rx+ry*ry+rz*rz);
 +               
 +        }else{
 +            /* xsplit: take half box x direction as tab range */
 +               box_r        = box[0][0]/2;
 +        }
 +        table.r         = box_r;
 +      table.scale     = 0;
 +      table.n         = 0;
 +      table.scale_exp = 0;
 +      nx0             = 10;
 +      nx              = 0;
 +      
 +        read_tables(out,fn,1,0,td);
 +        rtab      = td[0].x[td[0].nx-1];
 +
 +       if (fr->adress_type == eAdressXSplit && (rtab < box[0][0]/2)){
 +           gmx_fatal(FARGS,"AdResS full box therm force table in file %s extends to %f:\n"
 +                        "\tshould extend to at least half the length of the box in x-direction"
 +                        "%f\n",fn,rtab, box[0][0]/2);
 +       }
 +       if (rtab < box_r){
 +               gmx_fatal(FARGS,"AdResS full box therm force table in file %s extends to %f:\n"
 +                "\tshould extend to at least for spherical adress"
 +                "%f (=distance from center to furthermost point in box \n",fn,rtab, box_r);
 +       }
 +
 +
 +        table.n   = td[0].nx;
 +        nx        = table.n;
 +        table.scale = td[0].tabscale;
 +        nx0         = td[0].nx0;
 +
 +      /* Each table type (e.g. coul,lj6,lj12) requires four 
 +       * numbers per datapoint. For performance reasons we want
 +       * the table data to be aligned to 16-byte. This is accomplished
 +       * by allocating 16 bytes extra to a temporary pointer, and then
 +       * calculating an aligned pointer. This new pointer must not be
 +       * used in a free() call, but thankfully we're sloppy enough not
 +       * to do this :-)
 +       */
 +      
++    snew_aligned(table.data,4*nx,32);
 +
 +      copy2table(table.n,0,4,td[0].x,td[0].v,td[0].f,1.0,table.data);
 +      
 +      if(bDebugMode())
 +        {
 +          fp=xvgropen(fns[0],fns[0],"r","V",oenv);
 +          /* plot the output 5 times denser than the table data */
 +          /* for(i=5*nx0;i<5*table.n;i++) */
 +         
 +            for(i=5*((nx0+1)/2); i<5*table.n; i++)
 +            {
 +              /* x0=i*table.r/(5*table.n); */
 +              x0 = i*table.r/(5*(table.n-1));
 +              evaluate_table(table.data,0,4,table.scale,x0,&y0,&yp);
 +              fprintf(fp,"%15.10e  %15.10e  %15.10e\n",x0,y0,yp);
 +              
 +            }
 +          ffclose(fp);
 +        }
 +
 +      done_tabledata(&(td[0]));
 +      sfree(td);
 +
 +    table.interaction   = GMX_TABLE_INTERACTION_ELEC_VDWREP_VDWDISP;
 +    table.format        = GMX_TABLE_FORMAT_CUBICSPLINE_YFGH;
 +    table.formatsize    = 4;
 +    table.ninteractions = 3;
 +    table.stride        = table.formatsize*table.ninteractions;
 +
 +      
 +      return table;
 +}
 +
 +bondedtable_t make_bonded_table(FILE *fplog,char *fn,int angle)
 +{
 +  t_tabledata td;
 +  double start;
 +  int    i;
 +  bondedtable_t tab;
 +  
 +  if (angle < 2)
 +    start = 0;
 +  else
 +    start = -180.0;
 +  read_tables(fplog,fn,1,angle,&td);
 +  if (angle > 0) {
 +    /* Convert the table from degrees to radians */
 +    for(i=0; i<td.nx; i++) {
 +      td.x[i] *= DEG2RAD;
 +      td.f[i] *= RAD2DEG;
 +    }
 +    td.tabscale *= RAD2DEG;
 +  }
 +  tab.n = td.nx;
 +  tab.scale = td.tabscale;
 +  snew(tab.data,tab.n*4);
 +  copy2table(tab.n,0,4,td.x,td.v,td.f,1.0,tab.data);
 +  done_tabledata(&td);
 +
 +  return tab;
 +}
 +
 +
index ebf96cd12a6178fb899144b7691c25be0cf6e433,0000000000000000000000000000000000000000..2c4e21f0b3a32af23cc793d719c7bce2a3d23556
mode 100644,000000..100644
--- /dev/null
@@@ -1,121 -1,0 +1,120 @@@
-     g_dih
 +# TODO: It would be nicer to have the list generated from the binary,
 +# but this has some complications.  As the same list is also needed for
 +# man page generation, this can wait for now.
 +set(SYMLINK_NAMES
 +    do_dssp
 +    editconf
 +    eneconv
 +    genbox
 +    genconf
 +    genion
 +    genrestr
 +    make_edi
 +    make_ndx
 +    mk_angndx
 +    trjcat
 +    trjconv
 +    trjorder
 +    xpm2ps
 +    g_anadock
 +    g_anaeig
 +    g_analyze
 +    g_angle
 +    g_bar
 +    g_bond
 +    g_bundle
 +    g_chi
 +    g_cluster
 +    g_clustsize
 +    g_confrms
 +    g_covar
 +    g_current
 +    g_density
 +    g_densmap
 +    g_densorder
 +    g_dielectric
 +    g_dipoles
 +    g_disre
 +    g_dist
 +    g_dos
 +    g_dyecoupl
 +    g_dyndom
 +    g_enemat
 +    g_energy
 +    g_filter
 +    g_gyrate
 +    g_h2order
 +    g_hbond
 +    g_helix
 +    g_helixorient
 +    g_hydorder
 +    g_kinetics
 +    g_lie
 +    g_mdmat
 +    g_mindist
 +    g_morph
 +    g_msd
 +    g_nmeig
 +    g_nmens
 +    g_nmtraj
 +    g_options
 +    g_order
 +    g_polystat
 +    g_potential
 +    g_principal
 +    g_rama
 +    g_rdf
 +    g_rms
 +    g_rmsdist
 +    g_rmsf
 +    g_rotacf
 +    g_rotmat
 +    g_saltbr
 +    g_sans
 +    g_sas
 +    g_saxs
 +    g_select
 +    g_sgangle
 +    g_sham
 +    g_sigeps
 +    g_sorient
 +    g_spatial
 +    g_spol
 +    g_tcaf
 +    g_traj
 +    g_vanhove
 +    g_velacc
 +    g_wham
 +    g_wheel
 +    )
 +
 +if (NOT DEFINED BINARY_DIRECTORY)
 +    set(BINARY_DIRECTORY "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/@BIN_INSTALL_DIR@")
 +endif ()
 +set(BINARY_SUFFIX "@GMX_BINARY_SUFFIX@@CMAKE_EXECUTABLE_SUFFIX@")
 +set(GMX_NATIVE_WINDOWS "@GMX_NATIVE_WINDOWS@")
 +
 +foreach (linkname ${SYMLINK_NAMES})
 +    set(linkpath ${BINARY_DIRECTORY}/${linkname}${BINARY_SUFFIX})
 +    # Based on documentation, CMake only supports symbolic links on Unix,
 +    # although NTFS also has those (since Windows Vista; they require admin
 +    # permissions to create).
 +    if (GMX_NATIVE_WINDOWS)
 +        if (NOT QUIETLY)
 +            message(STATUS "Installing: Creating alias binary ${linkpath}")
 +        endif ()
 +        execute_process(
 +            COMMAND ${CMAKE_COMMAND} -E copy_if_different
 +                ${BINARY_DIRECTORY}/gmx${BINARY_SUFFIX} ${linkpath})
 +    else()
 +        if (NOT QUIETLY)
 +            message(STATUS "Installing: Creating symbolic link ${linkpath}")
 +        endif ()
 +        if (EXISTS ${linkpath})
 +            FILE(REMOVE ${linkpath})
 +        endif ()
 +        execute_process(
 +            COMMAND ${CMAKE_COMMAND} -E create_symlink
 +                gmx${BINARY_SUFFIX} ${linkpath})
 +    endif()
 +endforeach ()
index 292c56422027c90ac88f54e7086040d7b315a295,0000000000000000000000000000000000000000..a25ebed8d462871cd342824fe9c5fbb84bc3240c
mode 100644,000000..100644
--- /dev/null
@@@ -1,320 -1,0 +1,318 @@@
-     LegacyCmdLineWrapper::registerModule(manager, &gmx_dih, "dih",
-             "Analyze dihedral transitions");
 +/*
 + *
 + *                This source code is part of
 + *
 + *                 G   R   O   M   A   C   S
 + *
 + *          GROningen MAchine for Chemical Simulations
 + *
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2009, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + *
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + *
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + *
 + * For more info, check our website at http://www.gromacs.org
 + */
 +/*! \internal \brief
 + * Implements command-line modules for pre-5.0 binaries.
 + *
 + * \author Teemu Murtola <teemu.murtola@cbr.su.se>
 + */
 +#include "legacymodules.h"
 +
 +#include "gromacs/commandline/cmdlinemodule.h"
 +#include "gromacs/commandline/cmdlinemodulemanager.h"
 +#include "gromacs/onlinehelp/helpwritercontext.h"
 +#include "gromacs/utility/exceptions.h"
 +
 +#include "gromacs/legacyheaders/gmx_ana.h"
 +
 +namespace
 +{
 +
 +/*! \internal \brief
 + * Command-line module for wrapping pre-5.0 binaries.
 + *
 + * Implements a gmx::CommandLineModuleInterface, given a function with
 + * C/C++ main signature.
 + */
 +class LegacyCmdLineWrapper : public gmx::CommandLineModuleInterface
 +{
 +    public:
 +        //! Function pointer type for the main function of the module.
 +        typedef int (*MainFunction)(int argc, char *argv[]);
 +
 +        /*! \brief
 +         * Convenience function for creating and registering a module.
 +         *
 +         * \param[in] manager  Module manager to which to register the module.
 +         * \param[in] main     Main function to wrap.
 +         * \param[in] name     Name for the new module.
 +         * \param[in] shortDescription One-line description for the new module.
 +         */
 +        static void registerModule(gmx::CommandLineModuleManager *manager,
 +                                   MainFunction main, const char *name,
 +                                   const char *shortDescription)
 +        {
 +            gmx::CommandLineModulePointer module(
 +                    new LegacyCmdLineWrapper(main, name, shortDescription));
 +            manager->addModule(gmx::move(module));
 +        }
 +
 +        /*! \brief
 +         * Creates a wrapper module for the given main function.
 +         *
 +         * \see registerModule()
 +         */
 +        LegacyCmdLineWrapper(MainFunction main, const char *name,
 +                             const char *shortDescription)
 +            : main_(main), name_(name), shortDescription_(shortDescription)
 +        {
 +        }
 +
 +        virtual const char *name() const
 +        {
 +            return name_;
 +        }
 +        virtual const char *shortDescription() const
 +        {
 +            return shortDescription_;
 +        }
 +
 +        virtual int run(int argc, char *argv[]);
 +        virtual void writeHelp(const gmx::HelpWriterContext &context) const;
 +
 +    private:
 +        MainFunction            main_;
 +        const char             *name_;
 +        const char             *shortDescription_;
 +
 +};
 +
 +int LegacyCmdLineWrapper::run(int argc, char *argv[])
 +{
 +    return main_(argc, argv);
 +}
 +
 +void LegacyCmdLineWrapper::writeHelp(const gmx::HelpWriterContext &context) const
 +{
 +    if (context.outputFormat() != gmx::eHelpOutputFormat_Console)
 +    {
 +        GMX_THROW(gmx::NotImplementedError(
 +                    "Command-line help is not implemented for this output format"));
 +    }
 +    char *argv[2];
 +    // TODO: The constness should not be cast away.
 +    argv[0] = const_cast<char *>(name_);
 +    argv[1] = const_cast<char *>("-h");
 +    main_(2, argv);
 +}
 +
 +} // namespace
 +
 +void registerLegacyModules(gmx::CommandLineModuleManager *manager)
 +{
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_do_dssp, "do_dssp",
 +            "Assign secondary structure and calculate solvent accessible surface area");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_editconf, "editconf",
 +            "Convert and manipulates structure files");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_eneconv, "eneconv",
 +            "Convert energy files");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_genbox, "genbox",
 +            "Solvate a system");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_genconf, "genconf",
 +            "Multiply a conformation in 'random' orientations");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_genion, "genion",
 +            "Generate monoatomic ions on energetically favorable positions");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_genpr, "genrestr",
 +            "Generate position restraints or distance restraints for index groups");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_make_edi, "make_edi",
 +            "Generate input files for essential dynamics sampling");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_make_ndx, "make_ndx",
 +            "Make index files");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_mk_angndx, "mk_angndx",
 +            "Generate index files for 'gmx angle'");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_trjcat, "trjcat",
 +            "Concatenate trajectory files");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_trjconv, "trjconv",
 +            "Convert and manipulates trajectory files");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_trjorder, "trjorder",
 +            "Order molecules according to their distance to a group");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_xpm2ps, "xpm2ps",
 +            "Convert XPM (XPixelMap) matrices to postscript or XPM");
 +
 +    // TODO: Include remaining binaries from src/tools/.
 +    // These are commented out below, and have some issues to consider how to
 +    // best handle them.
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_anadock, "anadock",
 +            "Cluster structures from Autodock runs");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_anaeig, "anaeig",
 +            "Analyze eigenvectors/normal modes");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_analyze, "analyze",
 +            "Analyze data sets");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_g_angle, "angle",
 +            "Calculate distributions and correlations for angles and dihedrals");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_bar, "bar",
 +            "Calculate free energy difference estimates through Bennett's acceptance ratio");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_bond, "bond",
 +            "Calculate distances between atoms and bond length distributions");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_bundle, "bundle",
 +            "Analyze bundles of axes, e.g., helices");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_chi, "chi",
 +            "Calculate everything you want to know about chi and other dihedrals");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_cluster, "cluster",
 +            "Cluster structures");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_clustsize, "clustsize",
 +            "Calculate size distributions of atomic clusters");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_confrms, "confrms",
 +            "Fit two structures and calculates the RMSD");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_covar, "covar",
 +            "Calculate and diagonalize the covariance matrix");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_current, "current",
 +            "Calculate dielectric constants and charge autocorrelation function");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_density, "density",
 +            "Calculate the density of the system");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_densmap, "densmap",
 +            "Calculate 2D planar or axial-radial density maps");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_densorder, "densorder",
 +            "Calculate surface fluctuations");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_dielectric, "dielectric",
 +            "Calculate frequency dependent dielectric constants");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_dipoles, "dipoles",
 +            "Compute the total dipole plus fluctuations");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_disre, "disre",
 +            "Analyze distance restraints");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_dist, "dist",
 +            "Calculate distances between centers of mass of two groups");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_dos, "dos",
 +            "Analyze density of states and properties based on that");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_dyecoupl, "dyecoupl",
 +            "Extract dye dynamics from trajectories");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_dyndom, "dyndom",
 +            "Interpolate and extrapolate structure rotations");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_enemat, "enemat",
 +            "Extract an energy matrix from an energy file");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_energy, "energy",
 +            "Writes energies to xvg files and display averages");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_filter, "filter",
 +            "Frequency filter trajectories, useful for making smooth movies");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_gyrate, "gyrate",
 +            "Calculate the radius of gyration");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_h2order, "h2order",
 +            "Compute the orientation of water molecules");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_hbond, "hbond",
 +            "Compute and analyze hydrogen bonds");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_helix, "helix",
 +            "Calculate basic properties of alpha helices");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_helixorient, "helixorient",
 +            "Calculate local pitch/bending/rotation/orientation inside helices");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_hydorder, "hydorder",
 +            "Compute tetrahedrality parameters around a given atom");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_kinetics, "kinetics",
 +            "Analyze kinetic constants from properties based on the Eyring model");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_lie, "lie",
 +            "Estimate free energy from linear combinations");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_mdmat, "mdmat",
 +            "Calculate residue contact maps");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_mindist, "mindist",
 +            "Calculate the minimum distance between two groups");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_morph, "morph",
 +            "Interpolate linearly between conformations");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_msd, "msd",
 +            "Calculates mean square displacements");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_nmeig, "nmeig",
 +            "Diagonalize the Hessian for normal mode analysis");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_nmens, "nmens",
 +            "Generate an ensemble of structures from the normal modes");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_nmtraj, "nmtraj",
 +            "Generate a virtual oscillating trajectory from an eigenvector");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_options, "options",
 +            NULL);
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_order, "order",
 +            "Compute the order parameter per atom for carbon tails");
 +    //LegacyCmdLineWrapper::registerModule(manager, &gmx_pme_error, "pme_error",
 +    //        "Estimate the error of using PME with a given input file");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_polystat, "polystat",
 +            "Calculate static properties of polymers");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_potential, "potential",
 +            "Calculate the electrostatic potential across the box");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_principal, "principal",
 +            "Calculate principal axes of inertia for a group of atoms");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_rama, "rama",
 +            "Compute Ramachandran plots");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_rdf, "rdf",
 +            "Calculate radial distribution functions");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_rms, "rms",
 +            "Calculate RMSDs with a reference structure and RMSD matrices");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_rmsdist, "rmsdist",
 +            "Calculate atom pair distances averaged with power -2, -3 or -6");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_rmsf, "rmsf",
 +            "Calculate atomic fluctuations");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_rotacf, "rotacf",
 +            "Calculate the rotational correlation function for molecules");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_rotmat, "rotmat",
 +            "Plot the rotation matrix for fitting to a reference structure");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_saltbr, "saltbr",
 +            "Compute salt bridges");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_sans, "sans",
 +            "Compute the small angle neutron scattering spectra");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_sas, "sas",
 +            "Compute solvent accessible surface area");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_saxs, "saxs",
 +            "Calculates SAXS structure factors based on Cromer's method");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_sgangle, "sgangle",
 +            "Compute the angle and distance between two groups");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_sham, "sham",
 +            "Compute free energies or other histograms from histograms");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_sigeps, "sigeps",
 +            "Convert c6/12 or c6/cn combinations to and from sigma/epsilon");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_sorient, "sorient",
 +            "Analyze solvent orientation around solutes");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_spatial, "spatial",
 +            "Calculate the spatial distribution function");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_spol, "spol",
 +            "Analyze solvent dipole orientation and polarization around solutes");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_tcaf, "tcaf",
 +            "Calculate viscosities of liquids");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_traj, "traj",
 +            "Plot x, v, f, box, temperature and rotational energy from trajectories");
 +    //LegacyCmdLineWrapper::registerModule(manager, &gmx_tune_pme, "tune_pme",
 +    //        "Time mdrun as a function of PME nodes to optimize settings");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_vanhove, "vanhove",
 +            "Compute Van Hove correlation functions");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_velacc, "velacc",
 +            "Calculate velocity autocorrelation functions");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_wham, "wham",
 +            "Perform weighted histogram analysis after umbrella sampling");
 +    LegacyCmdLineWrapper::registerModule(manager, &gmx_wheel, "wheel",
 +            "Plot helical wheels");
 +
 +    // TODO: Also include binaries from other directories than src/tools/:
 +    //        "g_protonate|Protonate structures");
 +    //        "g_x2top|Generate a primitive topology from coordinates ");
 +    //        "g_xrama|Show animated Ramachandran plots");
 +    //        "gmxcheck|Check and compare files");
 +    //        "gmxdump|Make binary files human readable");
 +    //        "grompp|Make a run input file");
 +    //        "mdrun|finds a potential energy minimum and calculates the Hessian");
 +    //        "mdrun|performs a simulation, do a normal mode analysis or an energy minimization");
 +    //        "mdrun|with -rerun (re)calculates energies for trajectory frames");
 +    //        "ngmx|Display a trajectory");
 +    //        "pdb2gmx|Convert coordinate files to topology and FF-compliant coordinate files");
 +    //        "tpbconv|Make a run input file for restarting a crashed run");
 +}
index c6fd9cd960ac1aa736b142920b5a72dd243bbd12,0000000000000000000000000000000000000000..4c2bf0350ebb04dc63375793c5c03f428f4310ae
mode 100644,000000..100644
--- /dev/null
@@@ -1,716 -1,0 +1,718 @@@
-       case F_G96BONDS:
 +/*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <stdio.h>
 +#include <string.h>
 +#include <ctype.h>
 +#include "main.h"
 +#include "macros.h"
 +#include <math.h>
 +#include "futil.h"
 +#include "statutil.h"
 +#include "copyrite.h"
 +#include "sysstuff.h"
 +#include "txtdump.h"
 +#include "gmx_fatal.h"
 +#include "gmxfio.h"
 +#include "trnio.h"
 +#include "xtcio.h"
 +#include "tpbcmp.h"
 +#include "atomprop.h"
 +#include "vec.h"
 +#include "pbc.h"
 +#include "physics.h"
 +#include "index.h"
 +#include "smalloc.h"
 +#include "confio.h"
 +#include "enxio.h"
 +#include "tpxio.h"
 +#include "names.h"
 +#include "mtop_util.h"
 +
 +typedef struct {
 +  int bStep;
 +  int bTime;
 +  int bLambda;
 +  int bX;
 +  int bV;
 +  int bF;
 +  int bBox;
 +} t_count;
 +
 +typedef struct {
 +  float bStep;
 +  float bTime;
 +  float bLambda;
 +  float bX;
 +  float bV;
 +  float bF;
 +  float bBox;
 +} t_fr_time;
 +
 +static void tpx2system(FILE *fp,gmx_mtop_t *mtop)
 +{
 +  int i,nmol,nvsite=0;
 +  gmx_mtop_atomloop_block_t aloop;
 +  t_atom *atom;
 +
 +  fprintf(fp,"\\subsection{Simulation system}\n");
 +  aloop = gmx_mtop_atomloop_block_init(mtop);
 +  while(gmx_mtop_atomloop_block_next(aloop,&atom,&nmol)) {
 +    if (atom->ptype == eptVSite)
 +      nvsite += nmol;
 +  }
 +  fprintf(fp,"A system of %d molecules (%d atoms) was simulated.\n",
 +        mtop->mols.nr,mtop->natoms-nvsite);
 +  if (nvsite)
 +    fprintf(fp,"Virtual sites were used in some of the molecules.\n");
 +  fprintf(fp,"\n\n");
 +}
 +
 +static void tpx2params(FILE *fp,t_inputrec *ir)
 +{
 +  fprintf(fp,"\\subsection{Simulation settings}\n");
 +  fprintf(fp,"A total of %g ns were simulated with a time step of %g fs.\n",
 +        ir->nsteps*ir->delta_t*0.001,1000*ir->delta_t);
 +  fprintf(fp,"Neighbor searching was performed every %d steps.\n",ir->nstlist);
 +  fprintf(fp,"The %s algorithm was used for electrostatic interactions.\n",
 +        EELTYPE(ir->coulombtype));
 +  fprintf(fp,"with a cut-off of %g nm.\n",ir->rcoulomb);  
 +  if (ir->coulombtype == eelPME)
 +    fprintf(fp,"A reciprocal grid of %d x %d x %d cells was used with %dth order B-spline interpolation.\n",ir->nkx,ir->nky,ir->nkz,ir->pme_order);
 +  if (ir->rvdw > ir->rlist) 
 +    fprintf(fp,"A twin-range Van der Waals cut-off (%g/%g nm) was used, where the long range forces were updated during neighborsearching.\n",ir->rlist,ir->rvdw);
 +  else
 +    fprintf(fp,"A single cut-off of %g was used for Van der Waals interactions.\n",ir->rlist);
 +  if (ir->etc != 0) {
 +    fprintf(fp,"Temperature coupling was done with the %s algorithm.\n",
 +          etcoupl_names[ir->etc]);
 +  }
 +  if (ir->epc != 0) {
 +    fprintf(fp,"Pressure coupling was done with the %s algorithm.\n",
 +          epcoupl_names[ir->epc]);
 +  }
 +  fprintf(fp,"\n\n");
 +}
 +
 +static void tpx2methods(const char *tpx,const char *tex)
 +{
 +  FILE         *fp;
 +  t_tpxheader sh;
 +  t_inputrec  ir;
 +  t_state     state;
 +  gmx_mtop_t  mtop;
 +
 +  read_tpx_state(tpx,&ir,&state,NULL,&mtop);
 +  fp=gmx_fio_fopen(tex,"w");
 +  fprintf(fp,"\\section{Methods}\n");
 +  tpx2system(fp,&mtop);
 +  tpx2params(fp,&ir);
 +  gmx_fio_fclose(fp);
 +}
 +
 +static void chk_coords(int frame,int natoms,rvec *x,matrix box,real fac,real tol)
 +{
 +  int i,j;
 +  int nNul=0;
 +  real vol = det(box);
 +  
 +  for(i=0; (i<natoms); i++) {
 +    for(j=0; (j<DIM); j++) {
 +      if ((vol > 0) && (fabs(x[i][j]) > fac*box[j][j]))
 +      printf("Warning at frame %d: coordinates for atom %d are large (%g)\n",
 +             frame,i,x[i][j]);
 +    }
 +    if ((fabs(x[i][XX]) < tol) && 
 +        (fabs(x[i][YY]) < tol) && 
 +        (fabs(x[i][ZZ]) < tol))
 +    {
 +        nNul++;
 +    }
 +  }
 +  if (nNul > 0)
 +    printf("Warning at frame %d: there are %d particles with all coordinates zero\n",
 +         frame,nNul);
 +}
 +
 +static void chk_vels(int frame,int natoms,rvec *v)
 +{
 +  int i,j;
 +  
 +  for(i=0; (i<natoms); i++) {
 +    for(j=0; (j<DIM); j++) {
 +      if (fabs(v[i][j]) > 500)
 +      printf("Warning at frame %d. Velocities for atom %d are large (%g)\n",
 +             frame,i,v[i][j]);
 +    }
 +  }
 +}
 +
 +static void chk_forces(int frame,int natoms,rvec *f)
 +{
 +  int i,j;
 +  
 +  for(i=0; (i<natoms); i++) {
 +    for(j=0; (j<DIM); j++) {
 +      if (fabs(f[i][j]) > 10000)
 +      printf("Warning at frame %d. Forces for atom %d are large (%g)\n",
 +             frame,i,f[i][j]);
 +    }
 +  }
 +}
 +
 +static void chk_bonds(t_idef *idef,int ePBC,rvec *x,matrix box,real tol)
 +{
 +  int  ftype,i,k,ai,aj,type;
 +  real b0,blen,deviation,devtot;
 +  t_pbc pbc;
 +  rvec dx;
 +
 +  devtot = 0;
 +  set_pbc(&pbc,ePBC,box);  
 +  for(ftype=0; (ftype<F_NRE); ftype++) 
 +    if ((interaction_function[ftype].flags & IF_CHEMBOND) == IF_CHEMBOND) {
 +      for(k=0; (k<idef->il[ftype].nr); ) {
 +      type = idef->il[ftype].iatoms[k++];
 +      ai   = idef->il[ftype].iatoms[k++];
 +      aj   = idef->il[ftype].iatoms[k++]; 
 +      b0   = 0;    
 +      switch (ftype) {
 +      case F_BONDS:
 +        b0 = idef->iparams[type].harmonic.rA;
++      break;
++      case F_G96BONDS:
++        b0 = sqrt(idef->iparams[type].harmonic.rA);
 +        break;
 +      case F_MORSE:
 +        b0 = idef->iparams[type].morse.b0A;
 +        break;
 +      case F_CUBICBONDS:
 +        b0 = idef->iparams[type].cubic.b0;
 +        break;
 +      case F_CONSTR:
 +        b0 = idef->iparams[type].constr.dA;
 +        break;
 +      default:
 +        break;
 +      }
 +      if (b0 != 0) {
 +        pbc_dx(&pbc,x[ai],x[aj],dx);
 +        blen = norm(dx);
 +        deviation = sqr(blen-b0);
 +        if (sqrt(deviation/sqr(b0) > tol)) {
 +          fprintf(stderr,"Distance between atoms %d and %d is %.3f, should be %.3f\n",ai+1,aj+1,blen,b0);
 +        }
 +      }
 +      }
 +    }
 +}
 +
 +void chk_trj(const output_env_t oenv,const char *fn,const char *tpr,real tol)
 +{
 +  t_trxframe   fr;
 +  t_count      count;
 +  t_fr_time    first,last;
 +  int          j=-1,new_natoms,natoms;
 +  gmx_off_t    fpos;
 +  real         rdum,tt,old_t1,old_t2,prec;
 +  gmx_bool         bShowTimestep=TRUE,bOK,newline=FALSE;
 +  t_trxstatus *status;
 +  gmx_mtop_t   mtop;
 +  gmx_localtop_t *top=NULL;
 +  t_state      state;
 +  t_inputrec   ir;
 +  
 +  if (tpr) {
 +    read_tpx_state(tpr,&ir,&state,NULL,&mtop);
 +    top = gmx_mtop_generate_local_top(&mtop,&ir);
 +  }
 +  new_natoms = -1;
 +  natoms = -1;  
 +  
 +  printf("Checking file %s\n",fn);
 +  
 +  j      =  0;
 +  old_t2 = -2.0;
 +  old_t1 = -1.0;
 +  fpos   = 0;
 +  
 +  count.bStep = 0;
 +  count.bTime = 0;
 +  count.bLambda = 0;
 +  count.bX = 0;
 +  count.bV = 0;
 +  count.bF = 0;
 +  count.bBox = 0;
 +
 +  first.bStep = 0;
 +  first.bTime = 0;
 +  first.bLambda = 0;
 +  first.bX = 0;
 +  first.bV = 0;
 +  first.bF = 0;
 +  first.bBox = 0;
 +
 +  last.bStep = 0;
 +  last.bTime = 0;
 +  last.bLambda = 0;
 +  last.bX = 0;
 +  last.bV = 0;
 +  last.bF = 0;
 +  last.bBox = 0;
 +
 +  read_first_frame(oenv,&status,fn,&fr,TRX_READ_X | TRX_READ_V | TRX_READ_F);
 +
 +  do {
 +    if (j == 0) {
 +      fprintf(stderr,"\n# Atoms  %d\n",fr.natoms);
 +      if (fr.bPrec)
 +      fprintf(stderr,"Precision %g (nm)\n",1/fr.prec);
 +    }
 +    newline=TRUE;
 +    if ((natoms > 0) && (new_natoms != natoms)) {
 +      fprintf(stderr,"\nNumber of atoms at t=%g don't match (%d, %d)\n",
 +            old_t1,natoms,new_natoms);
 +      newline=FALSE;
 +    }
 +    if (j>=2) {
 +      if ( fabs((fr.time-old_t1)-(old_t1-old_t2)) > 
 +         0.1*(fabs(fr.time-old_t1)+fabs(old_t1-old_t2)) ) {
 +      bShowTimestep=FALSE;
 +      fprintf(stderr,"%sTimesteps at t=%g don't match (%g, %g)\n",
 +              newline?"\n":"",old_t1,old_t1-old_t2,fr.time-old_t1);
 +      }
 +    }
 +    natoms=new_natoms;
 +    if (tpr) {
 +      chk_bonds(&top->idef,ir.ePBC,fr.x,fr.box,tol);
 +    }
 +    if (fr.bX)
 +      chk_coords(j,natoms,fr.x,fr.box,1e5,tol);
 +    if (fr.bV)
 +      chk_vels(j,natoms,fr.v);
 +    if (fr.bF)
 +      chk_forces(j,natoms,fr.f);
 +      
 +    old_t2=old_t1;
 +    old_t1=fr.time;
 +    /*if (fpos && ((j<10 || j%10==0)))
 +      fprintf(stderr," byte: %10lu",(unsigned long)fpos);*/
 +    j++;
 +    new_natoms=fr.natoms;
 +#define INC(s,n,f,l,item) if (s.item != 0) { if (n.item==0) { first.item = fr.time; } last.item = fr.time; n.item++; }
 +    INC(fr,count,first,last,bStep);
 +    INC(fr,count,first,last,bTime);
 +    INC(fr,count,first,last,bLambda);
 +    INC(fr,count,first,last,bX);
 +    INC(fr,count,first,last,bV);
 +    INC(fr,count,first,last,bF);
 +    INC(fr,count,first,last,bBox);
 +#undef INC
 +    fpos = gmx_fio_ftell(trx_get_fileio(status));
 +  } while (read_next_frame(oenv,status,&fr));
 +  
 +  fprintf(stderr,"\n");
 +
 +  close_trj(status);
 +
 +  fprintf(stderr,"\nItem        #frames");
 +  if (bShowTimestep)
 +    fprintf(stderr," Timestep (ps)");
 +  fprintf(stderr,"\n");
 +#define PRINTITEM(label,item) fprintf(stderr,"%-10s  %6d",label,count.item); if ((bShowTimestep) && (count.item > 1)) fprintf(stderr,"    %g\n",(last.item-first.item)/(count.item-1)); else fprintf(stderr,"\n")
 +  PRINTITEM ( "Step",       bStep );
 +  PRINTITEM ( "Time",       bTime );
 +  PRINTITEM ( "Lambda",     bLambda );
 +  PRINTITEM ( "Coords",     bX );
 +  PRINTITEM ( "Velocities", bV );
 +  PRINTITEM ( "Forces",     bF );
 +  PRINTITEM ( "Box",        bBox );
 +}  
 +
 +void chk_tps(const char *fn, real vdw_fac, real bon_lo, real bon_hi)
 +{
 +  int       natom,i,j,k;
 +  char      title[STRLEN];
 +  t_topology top;
 +  int       ePBC;
 +  t_atoms   *atoms;
 +  rvec      *x,*v;
 +  rvec      dx;
 +  matrix    box;
 +  t_pbc     pbc;
 +  gmx_bool      bV,bX,bB,bFirst,bOut;
 +  real      r2,ekin,temp1,temp2,dist2,vdwfac2,bonlo2,bonhi2;
 +  real      *atom_vdw;
 +  gmx_atomprop_t aps;
 +  
 +  fprintf(stderr,"Checking coordinate file %s\n",fn);
 +  read_tps_conf(fn,title,&top,&ePBC,&x,&v,box,TRUE);
 +  atoms=&top.atoms;
 +  natom=atoms->nr;
 +  fprintf(stderr,"%d atoms in file\n",atoms->nr);
 +  
 +  /* check coordinates and box */
 +  bV=FALSE;
 +  bX=FALSE;
 +  for (i=0; (i<natom) && !(bV && bX); i++)
 +    for (j=0; (j<DIM) && !(bV && bX); j++) {
 +      bV=bV || (v[i][j]!=0);
 +      bX=bX || (x[i][j]!=0);
 +    }
 +  bB=FALSE;
 +  for (i=0; (i<DIM) && !bB; i++)
 +    for (j=0; (j<DIM) && !bB; j++)
 +      bB=bB || (box[i][j]!=0);
 +  
 +  fprintf(stderr,"coordinates %s\n",bX?"found":"absent");
 +  fprintf(stderr,"box         %s\n",bB?"found":"absent");
 +  fprintf(stderr,"velocities  %s\n",bV?"found":"absent");
 +  fprintf(stderr,"\n");
 +  
 +  /* check velocities */
 +  if (bV) {
 +    ekin=0.0;
 +    for (i=0; (i<natom); i++)
 +      for (j=0; (j<DIM); j++)
 +      ekin+=0.5*atoms->atom[i].m*v[i][j]*v[i][j];
 +    temp1=(2.0*ekin)/(natom*DIM*BOLTZ); 
 +    temp2=(2.0*ekin)/(natom*(DIM-1)*BOLTZ); 
 +    fprintf(stderr,"Kinetic energy: %g (kJ/mol)\n",ekin);
 +    fprintf(stderr,"Assuming the number of degrees of freedom to be "
 +          "Natoms * %d or Natoms * %d,\n"
 +          "the velocities correspond to a temperature of the system\n"
 +          "of %g K or %g K respectively.\n\n",DIM,DIM-1,temp1,temp2);
 +  }
 +  
 +  /* check coordinates */
 +  if (bX) {
 +    vdwfac2=sqr(vdw_fac);
 +    bonlo2=sqr(bon_lo);
 +    bonhi2=sqr(bon_hi);
 +   
 +    fprintf(stderr,
 +          "Checking for atoms closer than %g and not between %g and %g,\n"
 +          "relative to sum of Van der Waals distance:\n",
 +          vdw_fac,bon_lo,bon_hi);
 +    snew(atom_vdw,natom);
 +    aps = gmx_atomprop_init();
 +    for (i=0; (i<natom); i++) {
 +      gmx_atomprop_query(aps,epropVDW,
 +                       *(atoms->resinfo[atoms->atom[i].resind].name),
 +                       *(atoms->atomname[i]),&(atom_vdw[i]));
 +      if (debug) fprintf(debug,"%5d %4s %4s %7g\n",i+1,
 +                       *(atoms->resinfo[atoms->atom[i].resind].name),
 +                       *(atoms->atomname[i]),
 +                       atom_vdw[i]);
 +    }
 +    gmx_atomprop_destroy(aps);
 +    if (bB) 
 +      set_pbc(&pbc,ePBC,box);
 +      
 +    bFirst=TRUE;
 +    for (i=0; (i<natom); i++) {
 +      if (((i+1)%10)==0)
 +      fprintf(stderr,"\r%5d",i+1);
 +      for (j=i+1; (j<natom); j++) {
 +      if (bB)
 +        pbc_dx(&pbc,x[i],x[j],dx);
 +      else
 +        rvec_sub(x[i],x[j],dx);
 +      r2=iprod(dx,dx);
 +      dist2=sqr(atom_vdw[i]+atom_vdw[j]);
 +      if ( (r2<=dist2*bonlo2) || 
 +           ( (r2>=dist2*bonhi2) && (r2<=dist2*vdwfac2) ) ) {
 +        if (bFirst) {
 +          fprintf(stderr,"\r%5s %4s %8s %5s  %5s %4s %8s %5s  %6s\n",
 +                  "atom#","name","residue","r_vdw",
 +                  "atom#","name","residue","r_vdw","distance");
 +          bFirst=FALSE;
 +        }
 +        fprintf(stderr,
 +                "\r%5d %4s %4s%4d %-5.3g  %5d %4s %4s%4d %-5.3g  %-6.4g\n",
 +                i+1,*(atoms->atomname[i]),
 +                *(atoms->resinfo[atoms->atom[i].resind].name),
 +                atoms->resinfo[atoms->atom[i].resind].nr,
 +                atom_vdw[i],
 +                j+1,*(atoms->atomname[j]),
 +                *(atoms->resinfo[atoms->atom[j].resind].name),
 +                atoms->resinfo[atoms->atom[j].resind].nr,
 +                atom_vdw[j],
 +                sqrt(r2) );
 +      }
 +      }
 +    }
 +    if (bFirst) 
 +      fprintf(stderr,"\rno close atoms found\n");
 +    fprintf(stderr,"\r      \n");
 +    
 +    if (bB) {
 +      /* check box */
 +      bFirst=TRUE;
 +      k=0;
 +      for (i=0; (i<natom) && (k<10); i++) {
 +      bOut=FALSE;
 +      for(j=0; (j<DIM) && !bOut; j++)
 +        bOut = bOut || (x[i][j]<0) || (x[i][j]>box[j][j]);
 +      if (bOut) {
 +        k++;
 +        if (bFirst) {
 +          fprintf(stderr,"Atoms outside box ( ");
 +          for (j=0; (j<DIM); j++)
 +            fprintf(stderr,"%g ",box[j][j]);
 +          fprintf(stderr,"):\n"
 +                  "(These may occur often and are normally not a problem)\n"
 +                  "%5s %4s %8s %5s  %s\n",
 +                  "atom#","name","residue","r_vdw","coordinate");
 +          bFirst=FALSE;
 +        }
 +        fprintf(stderr,
 +                "%5d %4s %4s%4d %-5.3g",
 +                i,*(atoms->atomname[i]),
 +                *(atoms->resinfo[atoms->atom[i].resind].name),
 +                atoms->resinfo[atoms->atom[i].resind].nr,atom_vdw[i]);
 +        for (j=0; (j<DIM); j++)
 +          fprintf(stderr," %6.3g",x[i][j]);
 +        fprintf(stderr,"\n");
 +      }
 +      }
 +      if (k==10)
 +      fprintf(stderr,"(maybe more)\n");
 +      if (bFirst) 
 +      fprintf(stderr,"no atoms found outside box\n");
 +      fprintf(stderr,"\n");
 +    }
 +  }
 +}
 +
 +void chk_ndx(const char *fn)
 +{
 +  t_blocka *grps;
 +  char **grpname;
 +  int  i,j;
 +  
 +  grps = init_index(fn,&grpname);
 +  if (debug)
 +    pr_blocka(debug,0,fn,grps,FALSE);
 +  else {
 +    printf("Contents of index file %s\n",fn);
 +    printf("--------------------------------------------------\n");
 +    printf("Nr.   Group               #Entries   First    Last\n");
 +    for(i=0; (i<grps->nr); i++) {
 +      printf("%4d  %-20s%8d%8d%8d\n",i,grpname[i],
 +           grps->index[i+1]-grps->index[i],
 +           grps->a[grps->index[i]]+1,
 +           grps->a[grps->index[i+1]-1]+1);
 +    }
 +  }
 +  for(i=0; (i<grps->nr); i++) 
 +    sfree(grpname[i]);
 +  sfree(grpname);
 +  done_blocka(grps);
 +}
 +
 +void chk_enx(const char *fn)
 +{
 +  int        nre,fnr,ndr;
 +  ener_file_t in;
 +  gmx_enxnm_t *enm=NULL;
 +  t_enxframe *fr;
 +  gmx_bool       bShowTStep;
 +  real       t0,old_t1,old_t2;
 +  char       buf[22];
 +  
 +  fprintf(stderr,"Checking energy file %s\n\n",fn);
 +
 +  in = open_enx(fn,"r");
 +  do_enxnms(in,&nre,&enm);
 +  fprintf(stderr,"%d groups in energy file",nre);
 +  snew(fr,1);
 +  old_t2=-2.0;
 +  old_t1=-1.0;
 +  fnr=0;
 +  t0=NOTSET;
 +  bShowTStep=TRUE;
 +
 +  while (do_enx(in,fr)) {
 +    if (fnr>=2) {
 +      if ( fabs((fr->t-old_t1)-(old_t1-old_t2)) > 
 +         0.1*(fabs(fr->t-old_t1)+fabs(old_t1-old_t2)) ) {
 +      bShowTStep=FALSE;
 +      fprintf(stderr,"\nTimesteps at t=%g don't match (%g, %g)\n",
 +              old_t1,old_t1-old_t2,fr->t-old_t1);
 +      }
 +    }
 +    old_t2=old_t1;
 +    old_t1=fr->t;
 +    if (t0 == NOTSET) t0=fr->t;
 +    if (fnr == 0)
 +      fprintf(stderr,"\rframe: %6s (index %6d), t: %10.3f\n",
 +            gmx_step_str(fr->step,buf),fnr,fr->t);
 +    fnr++;
 +  }
 +  fprintf(stderr,"\n\nFound %d frames",fnr);
 +  if (bShowTStep && fnr>1)
 +    fprintf(stderr," with a timestep of %g ps",(old_t1-t0)/(fnr-1));
 +  fprintf(stderr,".\n");
 +
 +  free_enxframe(fr);
 +  free_enxnms(nre,enm);
 +  sfree(fr);
 +}
 +
 +int cmain(int argc,char *argv[])
 +{
 +  const char *desc[] = {
 +    "[TT]gmxcheck[tt] reads a trajectory ([TT].trj[tt], [TT].trr[tt] or ",
 +    "[TT].xtc[tt]), an energy file ([TT].ene[tt] or [TT].edr[tt])",
 +    "or an index file ([TT].ndx[tt])",
 +    "and prints out useful information about them.[PAR]",
 +    "Option [TT]-c[tt] checks for presence of coordinates,",
 +    "velocities and box in the file, for close contacts (smaller than",
 +    "[TT]-vdwfac[tt] and not bonded, i.e. not between [TT]-bonlo[tt]",
 +    "and [TT]-bonhi[tt], all relative to the sum of both Van der Waals",
 +    "radii) and atoms outside the box (these may occur often and are",
 +    "no problem). If velocities are present, an estimated temperature",
 +    "will be calculated from them.[PAR]",
 +    "If an index file, is given its contents will be summarized.[PAR]",
 +    "If both a trajectory and a [TT].tpr[tt] file are given (with [TT]-s1[tt])",
 +    "the program will check whether the bond lengths defined in the tpr",
 +    "file are indeed correct in the trajectory. If not you may have",
 +    "non-matching files due to e.g. deshuffling or due to problems with",
 +    "virtual sites. With these flags, [TT]gmxcheck[tt] provides a quick check for such problems.[PAR]",
 +    "The program can compare two run input ([TT].tpr[tt], [TT].tpb[tt] or",
 +    "[TT].tpa[tt]) files",
 +    "when both [TT]-s1[tt] and [TT]-s2[tt] are supplied.",
 +    "Similarly a pair of trajectory files can be compared (using the [TT]-f2[tt]",
 +    "option), or a pair of energy files (using the [TT]-e2[tt] option).[PAR]",
 +    "For free energy simulations the A and B state topology from one",
 +    "run input file can be compared with options [TT]-s1[tt] and [TT]-ab[tt].[PAR]",
 +    "In case the [TT]-m[tt] flag is given a LaTeX file will be written",
 +    "consisting of a rough outline for a methods section for a paper."
 +  };
 +  t_filenm fnm[] = {
 +    { efTRX, "-f",  NULL, ffOPTRD },
 +    { efTRX, "-f2",  NULL, ffOPTRD },
 +    { efTPX, "-s1", "top1", ffOPTRD },
 +    { efTPX, "-s2", "top2", ffOPTRD },
 +    { efTPS, "-c",  NULL, ffOPTRD },
 +    { efEDR, "-e",  NULL, ffOPTRD },
 +    { efEDR, "-e2", "ener2", ffOPTRD },
 +    { efNDX, "-n",  NULL, ffOPTRD },
 +    { efTEX, "-m",  NULL, ffOPTWR }
 +  };
 +#define NFILE asize(fnm)
 +  const char *fn1=NULL,*fn2=NULL,*tex=NULL;
 + 
 +  output_env_t oenv;
 +  static real vdw_fac=0.8;
 +  static real bon_lo=0.4;
 +  static real bon_hi=0.7;
 +  static gmx_bool bRMSD=FALSE;
 +  static real ftol=0.001;
 +  static real abstol=0.001;
 +  static gmx_bool bCompAB=FALSE;
 +  static char *lastener=NULL;
 +  static t_pargs pa[] = {
 +    { "-vdwfac", FALSE, etREAL, {&vdw_fac},
 +      "Fraction of sum of VdW radii used as warning cutoff" },
 +    { "-bonlo",  FALSE, etREAL, {&bon_lo},
 +      "Min. fract. of sum of VdW radii for bonded atoms" },
 +    { "-bonhi",  FALSE, etREAL, {&bon_hi},
 +      "Max. fract. of sum of VdW radii for bonded atoms" },
 +    { "-rmsd",   FALSE, etBOOL, {&bRMSD},
 +      "Print RMSD for x, v and f" },
 +    { "-tol",    FALSE, etREAL, {&ftol},
 +      "Relative tolerance for comparing real values defined as [MATH]2*(a-b)/([MAG]a[mag]+[MAG]b[mag])[math]" },
 +    { "-abstol",    FALSE, etREAL, {&abstol},
 +      "Absolute tolerance, useful when sums are close to zero." },
 +    { "-ab",     FALSE, etBOOL, {&bCompAB},
 +      "Compare the A and B topology from one file" }, 
 +    { "-lastener",FALSE, etSTR,  {&lastener},
 +      "Last energy term to compare (if not given all are tested). It makes sense to go up until the Pressure." }
 +  };
 +
 +  CopyRight(stderr,argv[0]);
 +  parse_common_args(&argc,argv,0,NFILE,fnm,asize(pa),pa,
 +                  asize(desc),desc,0,NULL,&oenv);
 +
 +  fn1 = opt2fn_null("-f",NFILE,fnm);
 +  fn2 = opt2fn_null("-f2",NFILE,fnm);
 +  tex = opt2fn_null("-m",NFILE,fnm);
 +  if (fn1 && fn2)
 +    comp_trx(oenv,fn1,fn2,bRMSD,ftol,abstol);
 +  else if (fn1)
 +    chk_trj(oenv,fn1,opt2fn_null("-s1",NFILE,fnm),ftol);
 +  else if (fn2)
 +    fprintf(stderr,"Please give me TWO trajectory (.xtc/.trr/.trj) files!\n");
 +  
 +  fn1 = opt2fn_null("-s1",NFILE,fnm);
 +  fn2 = opt2fn_null("-s2",NFILE,fnm);
 +  if ((fn1 && fn2) || bCompAB) {
 +    if (bCompAB) {
 +      if (fn1 == NULL)
 +      gmx_fatal(FARGS,"With -ab you need to set the -s1 option");
 +      fn2 = NULL;
 +    }
 +    comp_tpx(fn1,fn2,bRMSD,ftol,abstol);
 +  } else if (fn1 && tex)
 +    tpx2methods(fn1,tex);
 +  else if ((fn1 && !opt2fn_null("-f",NFILE,fnm)) || (!fn1 && fn2)) {
 +    fprintf(stderr,"Please give me TWO run input (.tpr/.tpa/.tpb) files\n"
 +          "or specify the -m flag to generate a methods.tex file\n");
 +  }
 +  
 +  fn1 = opt2fn_null("-e",NFILE,fnm);
 +  fn2 = opt2fn_null("-e2",NFILE,fnm);
 +  if (fn1 && fn2)
 +    comp_enx(fn1,fn2,ftol,abstol,lastener);
 +  else if (fn1)
 +    chk_enx(ftp2fn(efEDR,NFILE,fnm));
 +  else if (fn2)
 +    fprintf(stderr,"Please give me TWO energy (.edr/.ene) files!\n");
 +  
 +  if (ftp2bSet(efTPS,NFILE,fnm))
 +    chk_tps(ftp2fn(efTPS,NFILE,fnm), vdw_fac, bon_lo, bon_hi);
 +  
 +  if (ftp2bSet(efNDX,NFILE,fnm))
 +    chk_ndx(ftp2fn(efNDX,NFILE,fnm));
 +    
 +  thanx(stderr);
 +  
 +  return 0;
 +}
index 0de30812b6ff0f01d657ce7e24be24d5313e5c26,0000000000000000000000000000000000000000..a90da900756efebbace9a2697c481af4f1e509bc
mode 100644,000000..100644
--- /dev/null
@@@ -1,538 -1,0 +1,561 @@@
- static void assign_param(t_functype ftype,t_iparams *newparam,
 +/*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +/* This file is completely threadsafe - keep it that way! */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +#include "sysstuff.h"
 +#include "physics.h"
 +#include "vec.h"
 +#include "smalloc.h"
 +#include "typedefs.h"
 +#include "gmx_fatal.h"
 +#include "topio.h"
 +#include "toputil.h"
 +#include "convparm.h"
 +#include "names.h"
 +#include "gpp_atomtype.h"
 +#include "maths.h"
 +
 +static int round_check(real r,int limit,int ftype,const char *name)
 +{
 +  int i;
 +
 +  if (r >= 0)
 +    i = (int)(r + 0.5);
 +  else
 +    i = (int)(r - 0.5);
 +
 +  if (r-i > 0.01 || r-i < -0.01)
 +    gmx_fatal(FARGS,"A non-integer value (%f) was supplied for '%s' in %s",
 +            r,name,interaction_function[ftype].longname);
 +
 +  if (i < limit)
 +    gmx_fatal(FARGS,"Value of '%s' in %s is %d, which is smaller than the minimum of %d",
 +            name,interaction_function[ftype].longname,i,limit);
 +
 +  return i;
 +}
 +
 +static void set_ljparams(int comb,double reppow,real v,real w,
 +                       real *c6,real *c12)
 +{
 +  if (comb == eCOMB_ARITHMETIC || comb == eCOMB_GEOM_SIG_EPS) {
 +    if (v >= 0) {
 +      *c6  = 4*w*pow(v,6.0);
 +      *c12 = 4*w*pow(v,reppow);
 +    } else {
 +      /* Interpret negative sigma as c6=0 and c12 with -sigma */
 +      *c6  = 0;
 +      *c12 = 4*w*pow(-v,reppow);
 +    }
 +  } else {
 +    *c6  = v;
 +    *c12 = w;
 +  }
 +}
 +
-     {
++/* A return value of 0 means parameters were assigned successfully,
++ * returning -1 means this is an all-zero interaction that should not be added.
++ */
++static int
++assign_param(t_functype ftype,t_iparams *newparam,
 +                       real old[MAXFORCEPARAM],int comb,double reppow)
 +{
 +  int  i,j;
 +  real tmp;
++  gmx_bool all_param_zero=TRUE;
 +
 +  /* Set to zero */
 +  for(j=0; (j<MAXFORCEPARAM); j++) 
-     }
++  {
 +      newparam->generic.buf[j]=0.0;
-     newparam->pdihs.phiA = old[0];
-     newparam->pdihs.cpA  = old[1];
-                 
-     /* Dont do any checks if all parameters are zero (such interactions will be removed).
-      * Change 20100720: Amber occasionally uses negative multiplicities (mathematically OK),
-      * so I have changed the lower limit to -99 /EL
-      *
-      * Second, if the force constant is zero in both A and B states, we set the phase
-      * and multiplicity to zero too so the interaction gets removed during clean-up.
-      */       
-     newparam->pdihs.phiB = old[3];
-     newparam->pdihs.cpB  = old[4];
-           
-     if( fabs(newparam->pdihs.cpA) < GMX_REAL_MIN && fabs(newparam->pdihs.cpB) < GMX_REAL_MIN )
-     {
-         newparam->pdihs.phiA = 0.0; 
-         newparam->pdihs.phiB = 0.0; 
-         newparam->pdihs.mult = 0; 
-     } 
-     else
-     {
-         newparam->pdihs.mult = round_check(old[2],-99,ftype,"multiplicity");
-     }
++      /* If all parameters are zero we might not add some interaction types (selected below).
++       * We cannot apply this to ALL interactions, since many have valid reasons for having
++       * zero parameters (e.g. an index to a Cmap interaction, or LJ parameters), but
++       * we use it for angles and torsions that are typically generated automatically.
++       */
++      all_param_zero = (all_param_zero==TRUE) && fabs(old[j])<GMX_REAL_MIN;
++  }
++
++  if(all_param_zero==TRUE)
++  {
++      if(IS_ANGLE(ftype) || IS_RESTRAINT_TYPE(ftype) || ftype==F_IDIHS ||
++         ftype==F_PDIHS || ftype==F_PIDIHS || ftype==F_RBDIHS || ftype==F_FOURDIHS)
++      {
++          return -1;
++      }
++  }
++
 +  switch (ftype) {
 +  case F_G96ANGLES:
 +    /* Post processing of input data: store cosine iso angle itself */
 +    newparam->harmonic.rA =cos(old[0]*DEG2RAD);
 +    newparam->harmonic.krA=old[1];
 +    newparam->harmonic.rB =cos(old[2]*DEG2RAD);
 +    newparam->harmonic.krB=old[3];
 +    break;
 +  case F_G96BONDS:
 +    /* Post processing of input data: store square of length itself */
 +    newparam->harmonic.rA =sqr(old[0]);
 +    newparam->harmonic.krA=old[1];
 +    newparam->harmonic.rB =sqr(old[2]);
 +    newparam->harmonic.krB=old[3];
 +    break;
 +  case F_FENEBONDS:
 +    newparam->fene.bm=old[0];
 +    newparam->fene.kb=old[1];
 +    break;
 +  case F_RESTRBONDS:
 +    newparam->restraint.lowA = old[0];
 +    newparam->restraint.up1A = old[1];
 +    newparam->restraint.up2A = old[2];
 +    newparam->restraint.kA   = old[3];
 +    newparam->restraint.lowB = old[4];
 +    newparam->restraint.up1B = old[5];
 +    newparam->restraint.up2B = old[6];
 +    newparam->restraint.kB   = old[7];
 +    break;
 +  case F_TABBONDS:
 +  case F_TABBONDSNC:
 +  case F_TABANGLES:
 +  case F_TABDIHS:
 +    newparam->tab.table = round_check(old[0],0,ftype,"table index");
 +    newparam->tab.kA    = old[1];
 +    newparam->tab.kB    = old[3];
 +    break;
 +  case F_CROSS_BOND_BONDS:
 +    newparam->cross_bb.r1e=old[0];
 +    newparam->cross_bb.r2e=old[1];
 +    newparam->cross_bb.krr=old[2];
 +    break;
 +  case F_CROSS_BOND_ANGLES:
 +    newparam->cross_ba.r1e=old[0];
 +    newparam->cross_ba.r2e=old[1];
 +    newparam->cross_ba.r3e=old[2];
 +    newparam->cross_ba.krt=old[3];
 +    break;
 +  case F_UREY_BRADLEY:
 +    newparam->u_b.thetaA=old[0];
 +    newparam->u_b.kthetaA=old[1];
 +    newparam->u_b.r13A=old[2];
 +    newparam->u_b.kUBA=old[3];
 +    newparam->u_b.thetaB=old[4];
 +    newparam->u_b.kthetaB=old[5];
 +    newparam->u_b.r13B=old[6];
 +    newparam->u_b.kUBB=old[7];
 +    break;
 +  case F_QUARTIC_ANGLES:
 +    newparam->qangle.theta=old[0];
 +    for(i=0; i<5; i++)
 +      newparam->qangle.c[i]=old[i+1];
 +    break;
 +  case F_LINEAR_ANGLES:
 +    newparam->linangle.aA    = old[0];
 +    newparam->linangle.klinA = old[1];
 +    newparam->linangle.aB    = old[2];
 +    newparam->linangle.klinB = old[3];
 +    break;
 +  case F_BONDS:
 +  case F_ANGLES:
 +  case F_HARMONIC:
 +  case F_IDIHS:
 +    newparam->harmonic.rA =old[0];
 +    newparam->harmonic.krA=old[1];
 +    newparam->harmonic.rB =old[2];
 +    newparam->harmonic.krB=old[3];
 +    break;
 +  case F_MORSE:
 +    newparam->morse.b0A    =old[0];
 +    newparam->morse.cbA    =old[1];
 +    newparam->morse.betaA  =old[2];
 +    newparam->morse.b0B    =old[3];
 +    newparam->morse.cbB    =old[4];
 +    newparam->morse.betaB  =old[5];
 +    break;
 +  case F_CUBICBONDS:
 +    newparam->cubic.b0    =old[0];
 +    newparam->cubic.kb    =old[1];
 +    newparam->cubic.kcub  =old[2];
 +    break;
 +  case F_CONNBONDS:
 +    break;
 +  case F_POLARIZATION:
 +    newparam->polarize.alpha = old[0];
 +    break;
 +  case F_ANHARM_POL:
 +    newparam->anharm_polarize.alpha = old[0];
 +    newparam->anharm_polarize.drcut = old[1];
 +    newparam->anharm_polarize.khyp  = old[2];
 +    break;
 +  case F_WATER_POL:
 +    newparam->wpol.al_x   =old[0];
 +    newparam->wpol.al_y   =old[1];
 +    newparam->wpol.al_z   =old[2];
 +    newparam->wpol.rOH    =old[3];
 +    newparam->wpol.rHH    =old[4];
 +    newparam->wpol.rOD    =old[5];
 +    break;
 +  case F_THOLE_POL:
 +    newparam->thole.a      = old[0];
 +    newparam->thole.alpha1 = old[1];
 +    newparam->thole.alpha2 = old[2];
 +    if ((old[1] > 0) && (old[2] > 0))
 +      newparam->thole.rfac = old[0]*pow(old[1]*old[2],-1.0/6.0);
 +    else
 +      newparam->thole.rfac = 1;
 +    break;
 +  case F_BHAM:
 +    newparam->bham.a = old[0];
 +    newparam->bham.b = old[1];
 +    newparam->bham.c = old[2];
 +    break;
 +  case F_LJ14:
 +    set_ljparams(comb,reppow,old[0],old[1],&newparam->lj14.c6A,&newparam->lj14.c12A);
 +    set_ljparams(comb,reppow,old[2],old[3],&newparam->lj14.c6B,&newparam->lj14.c12B);
 +    break;
 +  case F_LJC14_Q:
 +    newparam->ljc14.fqq = old[0];
 +    newparam->ljc14.qi  = old[1];
 +    newparam->ljc14.qj  = old[2];
 +    set_ljparams(comb,reppow,old[3],old[4],&newparam->ljc14.c6,&newparam->ljc14.c12);
 +    break;
 +  case F_LJC_PAIRS_NB:
 +    newparam->ljcnb.qi = old[0];
 +    newparam->ljcnb.qj = old[1];
 +    set_ljparams(comb,reppow,old[2],old[3],&newparam->ljcnb.c6,&newparam->ljcnb.c12);
 +    break;
 +  case F_LJ:
 +    set_ljparams(comb,reppow,old[0],old[1],&newparam->lj.c6,&newparam->lj.c12);
 +    break;
 +  case F_PDIHS:
 +  case F_PIDIHS:
 +  case F_ANGRES:
 +  case F_ANGRESZ:
-     break;    
++          newparam->pdihs.phiA = old[0];
++          newparam->pdihs.cpA  = old[1];
++
++          /* Change 20100720: Amber occasionally uses negative multiplicities (mathematically OK),
++           * so I have changed the lower limit to -99 /EL
++           */
++          newparam->pdihs.phiB = old[3];
++          newparam->pdihs.cpB  = old[4];
++          /* If both force constants are zero there is no interaction. Return -1 to signal
++           * this entry should NOT be added.
++           */
++          if( fabs(newparam->pdihs.cpA) < GMX_REAL_MIN && fabs(newparam->pdihs.cpB) < GMX_REAL_MIN )
++          {
++              return -1;
++          }
++    
++          newparam->pdihs.mult = round_check(old[2],-99,ftype,"multiplicity");
 +          
 +    break;
 +  case F_POSRES:
 +    newparam->posres.fcA[XX]   = old[0];
 +    newparam->posres.fcA[YY]   = old[1];
 +    newparam->posres.fcA[ZZ]   = old[2];
 +    newparam->posres.fcB[XX]   = old[3];
 +    newparam->posres.fcB[YY]   = old[4];
 +    newparam->posres.fcB[ZZ]   = old[5];
 +    newparam->posres.pos0A[XX] = old[6];
 +    newparam->posres.pos0A[YY] = old[7];
 +    newparam->posres.pos0A[ZZ] = old[8];
 +    newparam->posres.pos0B[XX] = old[9];
 +    newparam->posres.pos0B[YY] = old[10];
 +    newparam->posres.pos0B[ZZ] = old[11];
 +    break;
 +  case F_FBPOSRES:
 +    newparam->fbposres.geom     = round_check(old[0],0,ftype,"geometry");
 +    if ( ! (newparam->fbposres.geom > efbposresZERO && newparam->fbposres.geom < efbposresNR))
 +    {
 +      gmx_fatal(FARGS,"Invalid geometry for flat-bottomed position restraint.\n"
 +              "Expected number between 1 and %d. Found %d\n", efbposresNR-1,
 +              newparam->fbposres.geom);
 +    }
 +    newparam->fbposres.r        = old[1];
 +    newparam->fbposres.k        = old[2];
 +    newparam->fbposres.pos0[XX] = old[3];
 +    newparam->fbposres.pos0[YY] = old[4];
 +    newparam->fbposres.pos0[ZZ] = old[5];
 +    break;
 +  case F_DISRES:
 +    newparam->disres.label = round_check(old[0],0,ftype,"label");
 +    newparam->disres.type  = round_check(old[1],1,ftype,"type'");
 +    newparam->disres.low   = old[2];
 +    newparam->disres.up1   = old[3];
 +    newparam->disres.up2   = old[4];
 +    newparam->disres.kfac  = old[5];
 +    break;
 +  case F_ORIRES:
 +    newparam->orires.ex    = round_check(old[0],1,ftype,"experiment") - 1;
 +    newparam->orires.label = round_check(old[1],1,ftype,"label");
 +    newparam->orires.power = round_check(old[2],0,ftype,"power");
 +    newparam->orires.c     = old[3];
 +    newparam->orires.obs   = old[4];
 +    newparam->orires.kfac  = old[5];
 +    break;
 +  case F_DIHRES:
 +    newparam->dihres.phiA  = old[0];
 +    newparam->dihres.dphiA = old[1];
 +    newparam->dihres.kfacA = old[2];
 +    newparam->dihres.phiB   = old[3];
 +    newparam->dihres.dphiB  = old[4];
 +    newparam->dihres.kfacB  = old[5];
 +    break;
 +  case F_RBDIHS:
 +    for (i=0; (i<NR_RBDIHS); i++) {
 +      newparam->rbdihs.rbcA[i]=old[i]; 
 +      newparam->rbdihs.rbcB[i]=old[NR_RBDIHS+i]; 
 +    }
 +    break;
 +  case F_FOURDIHS:
 +    /* Read the dihedral parameters to temporary arrays,
 +     * and convert them to the computationally faster
 +     * Ryckaert-Bellemans form.
 +     */   
 +    /* Use conversion formula for OPLS to Ryckaert-Bellemans: */
 +    newparam->rbdihs.rbcA[0]=old[1]+0.5*(old[0]+old[2]);
 +    newparam->rbdihs.rbcA[1]=0.5*(3.0*old[2]-old[0]);
 +    newparam->rbdihs.rbcA[2]=4.0*old[3]-old[1];
 +    newparam->rbdihs.rbcA[3]=-2.0*old[2];
 +    newparam->rbdihs.rbcA[4]=-4.0*old[3];
 +    newparam->rbdihs.rbcA[5]=0.0;
 +
 +    newparam->rbdihs.rbcB[0]=old[NR_FOURDIHS+1]+0.5*(old[NR_FOURDIHS+0]+old[NR_FOURDIHS+2]);
 +    newparam->rbdihs.rbcB[1]=0.5*(3.0*old[NR_FOURDIHS+2]-old[NR_FOURDIHS+0]);
 +    newparam->rbdihs.rbcB[2]=4.0*old[NR_FOURDIHS+3]-old[NR_FOURDIHS+1];
 +    newparam->rbdihs.rbcB[3]=-2.0*old[NR_FOURDIHS+2];
 +    newparam->rbdihs.rbcB[4]=-4.0*old[NR_FOURDIHS+3];
 +    newparam->rbdihs.rbcB[5]=0.0;
-   
-   assign_param(ftype,&newparam,forceparams,comb,reppow);
++    break;
 +  case F_CONSTR:
 +  case F_CONSTRNC:
 +    newparam->constr.dA = old[0];
 +    newparam->constr.dB = old[1];
 +    break;
 +  case F_SETTLE:
 +    newparam->settle.doh=old[0];
 +    newparam->settle.dhh=old[1];
 +    break;
 +  case F_VSITE2:
 +  case F_VSITE3:
 +  case F_VSITE3FD:
 +  case F_VSITE3OUT:
 +  case F_VSITE4FD:
 +  case F_VSITE4FDN:
 +    newparam->vsite.a=old[0];
 +    newparam->vsite.b=old[1];
 +    newparam->vsite.c=old[2];
 +    newparam->vsite.d=old[3];
 +    newparam->vsite.e=old[4];
 +    newparam->vsite.f=old[5];
 +    break;
 +  case F_VSITE3FAD:
 +    newparam->vsite.a=old[1] * cos(DEG2RAD * old[0]);
 +    newparam->vsite.b=old[1] * sin(DEG2RAD * old[0]);
 +    newparam->vsite.c=old[2];
 +    newparam->vsite.d=old[3];
 +    newparam->vsite.e=old[4];
 +    newparam->vsite.f=old[5];
 +    break;
 +  case F_VSITEN:
 +    newparam->vsiten.n = round_check(old[0],1,ftype,"number of atoms");
 +    newparam->vsiten.a = old[1];
 +    break;
 +  case F_CMAP:
 +    newparam->cmap.cmapA=old[0];
 +    newparam->cmap.cmapB=old[1];
 +    break;
 +  case F_GB12:
 +  case F_GB13:
 +  case F_GB14:
 +    newparam->gb.sar  = old[0];
 +    newparam->gb.st   = old[1];
 +    newparam->gb.pi   = old[2];
 +    newparam->gb.gbr  = old[3];
 +    newparam->gb.bmlt = old[4];
 +    break;
 +  default:
 +    gmx_fatal(FARGS,"unknown function type %d in %s line %d",
 +            ftype,__FILE__,__LINE__);
 +  }
++    return 0;
 +}
 +
 +static int enter_params(gmx_ffparams_t *ffparams, t_functype ftype,
 +                      real forceparams[MAXFORCEPARAM],int comb,real reppow,
 +                      int start,gmx_bool bAppend)
 +{
 +  t_iparams newparam;
 +  int       type;
-     if (!bNB) {
++  int       rc;
++
++  if( (rc=assign_param(ftype,&newparam,forceparams,comb,reppow))<0 )
++  {
++      /* -1 means this interaction is all-zero and should not be added */
++      return rc;
++  }
++
 +  if (!bAppend) {
 +    for (type=start; (type<ffparams->ntypes); type++) {
 +      if (ffparams->functype[type]==ftype) {
 +      if (F_GB13 == ftype) {
 +        /* Occasionally, the way the 1-3 reference distance is
 +         * computed can lead to non-binary-identical results, but I
 +         * don't know why. */
 +        if ((gmx_within_tol(newparam.gb.sar,  ffparams->iparams[type].gb.sar,  1e-6)) &&
 +            (gmx_within_tol(newparam.gb.st,   ffparams->iparams[type].gb.st,   1e-6)) &&
 +            (gmx_within_tol(newparam.gb.pi,   ffparams->iparams[type].gb.pi,   1e-6)) &&
 +            (gmx_within_tol(newparam.gb.gbr,  ffparams->iparams[type].gb.gbr,  1e-6)) &&
 +            (gmx_within_tol(newparam.gb.bmlt, ffparams->iparams[type].gb.bmlt, 1e-6))) {
 +          return type;
 +        }
 +      }
 +      else {
 +      if (memcmp(&newparam,&ffparams->iparams[type],(size_t)sizeof(newparam)) == 0)
 +        return type;
 +      }
 +      }
 +    }
 +  }
 +  else {
 +    type = ffparams->ntypes;
 +  }
 +  if (debug)
 +    fprintf(debug,"copying newparam to ffparams->iparams[%d] (ntypes=%d)\n",
 +          type,ffparams->ntypes);
 +  memcpy(&ffparams->iparams[type],&newparam,(size_t)sizeof(newparam));
 +  
 +  ffparams->ntypes++;
 +  ffparams->functype[type]=ftype;
 +
 +  return type;
 +}
 +
 +static void append_interaction(t_ilist *ilist,
 +                               int type,int nral,atom_id a[MAXATOMLIST])
 +{
 +  int i,where1;
 +  
 +  where1     = ilist->nr;
 +  ilist->nr += nral+1;
 +
 +  ilist->iatoms[where1++]=type;
 +  for (i=0; (i<nral); i++) 
 +    ilist->iatoms[where1++]=a[i];
 +}
 +
 +static void enter_function(t_params *p,t_functype ftype,int comb,real reppow,
 +                           gmx_ffparams_t *ffparams,t_ilist *il,
 +                         int *maxtypes,
 +                         gmx_bool bNB,gmx_bool bAppend)
 +{
 +  int     k,type,nr,nral,delta,start;
 +  
 +  start = ffparams->ntypes;
 +  nr    = p->nr;
 +  
 +  for (k=0; k<nr; k++) {
 +    if (*maxtypes <= ffparams->ntypes) {
 +      *maxtypes += 1000;
 +      srenew(ffparams->functype,*maxtypes);
 +      srenew(ffparams->iparams, *maxtypes);
 +      if (debug) 
 +      fprintf(debug,"%s, line %d: srenewed idef->functype and idef->iparams to %d\n",
 +              __FILE__,__LINE__,*maxtypes);
 +    }
 +    type = enter_params(ffparams,ftype,p->param[k].c,comb,reppow,start,bAppend);
++    /* Type==-1 is used as a signal that this interaction is all-zero and should not be added. */
++    if (!bNB && type>=0) {
 +      nral  = NRAL(ftype);
 +      delta = nr*(nral+1);
 +      srenew(il->iatoms,il->nr+delta);
 +      append_interaction(il,type,nral,p->param[k].a);
 +    }
 +  }
 +}
 +
 +void convert_params(int atnr,t_params nbtypes[],
 +                  t_molinfo *mi,int comb,double reppow,real fudgeQQ,
 +                  gmx_mtop_t *mtop)
 +{
 +  int    i,j,maxtypes,mt;
 +  unsigned long  flags;
 +  gmx_ffparams_t *ffp;
 +  gmx_moltype_t *molt;
 +  t_params *plist;
 +
 +  maxtypes=0;
 +  
 +  ffp = &mtop->ffparams;
 +  ffp->ntypes   = 0;
 +  ffp->atnr     = atnr;
 +  ffp->functype = NULL;
 +  ffp->iparams  = NULL;
 +  ffp->reppow   = reppow;
 +
 +  enter_function(&(nbtypes[F_LJ]),  (t_functype)F_LJ,    comb,reppow,ffp,NULL,
 +               &maxtypes,TRUE,TRUE);
 +  enter_function(&(nbtypes[F_BHAM]),(t_functype)F_BHAM,  comb,reppow,ffp,NULL,
 +               &maxtypes,TRUE,TRUE);
 +
 +  for(mt=0; mt<mtop->nmoltype; mt++) {
 +    molt = &mtop->moltype[mt];
 +    for(i=0; (i<F_NRE); i++) {
 +      molt->ilist[i].nr     = 0;
 +      molt->ilist[i].iatoms = NULL;
 +      
 +      plist = mi[mt].plist;
 +
 +      flags = interaction_function[i].flags;
 +      if ((i != F_LJ) && (i != F_BHAM) && ((flags & IF_BOND) ||
 +                                         (flags & IF_VSITE) ||
 +                                         (flags & IF_CONSTRAINT))) {
 +      enter_function(&(plist[i]),(t_functype)i,comb,reppow,
 +                     ffp,&molt->ilist[i],
 +                     &maxtypes,FALSE,(i == F_POSRES  || i == F_FBPOSRES));
 +      }
 +    }
 +  }
 +  if (debug) {
 +    fprintf(debug,"%s, line %d: There are %d functypes in idef\n",
 +          __FILE__,__LINE__,ffp->ntypes);
 +  }
 +
 +  ffp->fudgeQQ = fudgeQQ;
 +}
index 386b897e322ff58a7c38adb53ed2bcb644c979b3,0000000000000000000000000000000000000000..da949ade755c10c105406c0d5d0a9a81a2fb5a99
mode 100644,000000..100644
--- /dev/null
@@@ -1,2147 -1,0 +1,2153 @@@
-     if (repl_ex_nst > 0 && repl_ex_nst > nstfep)
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include "typedefs.h"
 +#include "smalloc.h"
 +#include "sysstuff.h"
 +#include "vec.h"
 +#include "statutil.h"
 +#include "vcm.h"
 +#include "mdebin.h"
 +#include "nrnb.h"
 +#include "calcmu.h"
 +#include "index.h"
 +#include "vsite.h"
 +#include "update.h"
 +#include "ns.h"
 +#include "trnio.h"
 +#include "xtcio.h"
 +#include "mdrun.h"
 +#include "md_support.h"
 +#include "md_logging.h"
 +#include "confio.h"
 +#include "network.h"
 +#include "pull.h"
 +#include "xvgr.h"
 +#include "physics.h"
 +#include "names.h"
 +#include "xmdrun.h"
 +#include "ionize.h"
 +#include "disre.h"
 +#include "orires.h"
 +#include "pme.h"
 +#include "mdatoms.h"
 +#include "repl_ex.h"
 +#include "qmmm.h"
 +#include "domdec.h"
 +#include "domdec_network.h"
 +#include "partdec.h"
 +#include "topsort.h"
 +#include "coulomb.h"
 +#include "constr.h"
 +#include "shellfc.h"
 +#include "compute_io.h"
 +#include "mvdata.h"
 +#include "checkpoint.h"
 +#include "mtop_util.h"
 +#include "sighandler.h"
 +#include "txtdump.h"
 +#include "string2.h"
 +#include "pme_loadbal.h"
 +#include "bondf.h"
 +#include "membed.h"
 +#include "types/nlistheuristics.h"
 +#include "types/iteratedconstraints.h"
 +#include "nbnxn_cuda_data_mgmt.h"
 +
 +#ifdef GMX_LIB_MPI
 +#include <mpi.h>
 +#endif
 +#ifdef GMX_THREAD_MPI
 +#include "tmpi.h"
 +#endif
 +
 +#ifdef GMX_FAHCORE
 +#include "corewrap.h"
 +#endif
 +
 +static void reset_all_counters(FILE *fplog,t_commrec *cr,
 +                               gmx_large_int_t step,
 +                               gmx_large_int_t *step_rel,t_inputrec *ir,
 +                               gmx_wallcycle_t wcycle,t_nrnb *nrnb,
 +                               gmx_runtime_t *runtime,
 +                               nbnxn_cuda_ptr_t cu_nbv)
 +{
 +    char sbuf[STEPSTRSIZE];
 +
 +    /* Reset all the counters related to performance over the run */
 +    md_print_warn(cr,fplog,"step %s: resetting all time and cycle counters\n",
 +                  gmx_step_str(step,sbuf));
 +
 +    if (cu_nbv)
 +    {
 +        nbnxn_cuda_reset_timings(cu_nbv);
 +    }
 +
 +    wallcycle_stop(wcycle,ewcRUN);
 +    wallcycle_reset_all(wcycle);
 +    if (DOMAINDECOMP(cr))
 +    {
 +        reset_dd_statistics_counters(cr->dd);
 +    }
 +    init_nrnb(nrnb);
 +    ir->init_step += *step_rel;
 +    ir->nsteps    -= *step_rel;
 +    *step_rel = 0;
 +    wallcycle_start(wcycle,ewcRUN);
 +    runtime_start(runtime);
 +    print_date_and_time(fplog,cr->nodeid,"Restarted time",runtime);
 +}
 +
 +double do_md(FILE *fplog,t_commrec *cr,int nfile,const t_filenm fnm[],
 +             const output_env_t oenv, gmx_bool bVerbose,gmx_bool bCompact,
 +             int nstglobalcomm,
 +             gmx_vsite_t *vsite,gmx_constr_t constr,
 +             int stepout,t_inputrec *ir,
 +             gmx_mtop_t *top_global,
 +             t_fcdata *fcd,
 +             t_state *state_global,
 +             t_mdatoms *mdatoms,
 +             t_nrnb *nrnb,gmx_wallcycle_t wcycle,
 +             gmx_edsam_t ed,t_forcerec *fr,
 +             int repl_ex_nst,int repl_ex_nex,int repl_ex_seed,gmx_membed_t membed,
 +             real cpt_period,real max_hours,
 +             const char *deviceOptions,
 +             unsigned long Flags,
 +             gmx_runtime_t *runtime)
 +{
 +    gmx_mdoutf_t *outf;
 +    gmx_large_int_t step,step_rel;
 +    double     run_time;
 +    double     t,t0,lam0[efptNR];
 +    gmx_bool       bGStatEveryStep,bGStat,bCalcVir,bCalcEner;
 +    gmx_bool       bNS,bNStList,bSimAnn,bStopCM,bRerunMD,bNotLastFrame=FALSE,
 +               bFirstStep,bStateFromCP,bStateFromTPX,bInitStep,bLastStep,
 +               bBornRadii,bStartingFromCpt;
 +    gmx_bool   bDoDHDL=FALSE,bDoFEP=FALSE,bDoExpanded=FALSE;
 +    gmx_bool       do_ene,do_log,do_verbose,bRerunWarnNoV=TRUE,
 +               bForceUpdate=FALSE,bCPT;
 +    int        mdof_flags;
 +    gmx_bool       bMasterState;
 +    int        force_flags,cglo_flags;
 +    tensor     force_vir,shake_vir,total_vir,tmp_vir,pres;
 +    int        i,m;
 +    t_trxstatus *status;
 +    rvec       mu_tot;
 +    t_vcm      *vcm;
 +    t_state    *bufstate=NULL;   
 +    matrix     *scale_tot,pcoupl_mu,M,ebox;
 +    gmx_nlheur_t nlh;
 +    t_trxframe rerun_fr;
 +    gmx_repl_ex_t repl_ex=NULL;
 +    int        nchkpt=1;
 +    gmx_localtop_t *top;      
 +    t_mdebin *mdebin=NULL;
 +    df_history_t df_history;
 +    t_state    *state=NULL;
 +    rvec       *f_global=NULL;
 +    int        n_xtc=-1;
 +    rvec       *x_xtc=NULL;
 +    gmx_enerdata_t *enerd;
 +    rvec       *f=NULL;
 +    gmx_global_stat_t gstat;
 +    gmx_update_t upd=NULL;
 +    t_graph    *graph=NULL;
 +    globsig_t   gs;
 +    gmx_rng_t mcrng=NULL;
 +    gmx_bool        bFFscan;
 +    gmx_groups_t *groups;
 +    gmx_ekindata_t *ekind, *ekind_save;
 +    gmx_shellfc_t shellfc;
 +    int         count,nconverged=0;
 +    real        timestep=0;
 +    double      tcount=0;
 +    gmx_bool        bIonize=FALSE;
 +    gmx_bool        bTCR=FALSE,bConverged=TRUE,bOK,bSumEkinhOld,bExchanged;
 +    gmx_bool        bAppend;
 +    gmx_bool        bResetCountersHalfMaxH=FALSE;
 +    gmx_bool        bVV,bIterations,bFirstIterate,bTemp,bPres,bTrotter;
 +    gmx_bool        bUpdateDoLR;
 +    real        mu_aver=0,dvdl;
 +    int         a0,a1,gnx=0,ii;
 +    atom_id     *grpindex=NULL;
 +    char        *grpname;
 +    t_coupl_rec *tcr=NULL;
 +    rvec        *xcopy=NULL,*vcopy=NULL,*cbuf=NULL;
 +    matrix      boxcopy={{0}},lastbox;
 +      tensor      tmpvir;
 +      real        fom,oldfom,veta_save,pcurr,scalevir,tracevir;
 +      real        vetanew = 0;
 +    int         lamnew=0;
 +    /* for FEP */
 +    int         fep_state=0;
 +    int         nstfep;
 +    real        rate;
 +    double      cycles;
 +      real        saved_conserved_quantity = 0;
 +    real        last_ekin = 0;
 +      int         iter_i;
 +      t_extmass   MassQ;
 +    int         **trotter_seq; 
 +    char        sbuf[STEPSTRSIZE],sbuf2[STEPSTRSIZE];
 +    int         handled_stop_condition=gmx_stop_cond_none; /* compare to get_stop_condition*/
 +    gmx_iterate_t iterate;
 +    gmx_large_int_t multisim_nsteps=-1; /* number of steps to do  before first multisim 
 +                                          simulation stops. If equal to zero, don't
 +                                          communicate any more between multisims.*/
 +    /* PME load balancing data for GPU kernels */
 +    pme_load_balancing_t pme_loadbal=NULL;
 +    double          cycles_pmes;
 +    gmx_bool        bPMETuneTry=FALSE,bPMETuneRunning=FALSE;
 +
 +#ifdef GMX_FAHCORE
 +    /* Temporary addition for FAHCORE checkpointing */
 +    int chkpt_ret;
 +#endif
 +    
 +    /* Check for special mdrun options */
 +    bRerunMD = (Flags & MD_RERUN);
 +    bIonize  = (Flags & MD_IONIZE);
 +    bFFscan  = (Flags & MD_FFSCAN);
 +    bAppend  = (Flags & MD_APPENDFILES);
 +    if (Flags & MD_RESETCOUNTERSHALFWAY)
 +    {
 +        if (ir->nsteps > 0)
 +        {
 +            /* Signal to reset the counters half the simulation steps. */
 +            wcycle_set_reset_counters(wcycle,ir->nsteps/2);
 +        }
 +        /* Signal to reset the counters halfway the simulation time. */
 +        bResetCountersHalfMaxH = (max_hours > 0);
 +    }
 +
 +    /* md-vv uses averaged full step velocities for T-control 
 +       md-vv-avek uses averaged half step velocities for T-control (but full step ekin for P control)
 +       md uses averaged half step kinetic energies to determine temperature unless defined otherwise by GMX_EKIN_AVE_VEL; */
 +    bVV = EI_VV(ir->eI);
 +    if (bVV) /* to store the initial velocities while computing virial */
 +    {
 +        snew(cbuf,top_global->natoms);
 +    }
 +    /* all the iteratative cases - only if there are constraints */ 
 +    bIterations = ((IR_NPH_TROTTER(ir) || IR_NPT_TROTTER(ir)) && (constr) && (!bRerunMD));
 +    bTrotter = (bVV && (IR_NPT_TROTTER(ir) || IR_NPH_TROTTER(ir) || IR_NVT_TROTTER(ir)));
 +    
 +    if (bRerunMD)
 +    {
 +        /* Since we don't know if the frames read are related in any way,
 +         * rebuild the neighborlist at every step.
 +         */
 +        ir->nstlist       = 1;
 +        ir->nstcalcenergy = 1;
 +        nstglobalcomm     = 1;
 +    }
 +
 +    check_ir_old_tpx_versions(cr,fplog,ir,top_global);
 +
 +    nstglobalcomm = check_nstglobalcomm(fplog,cr,nstglobalcomm,ir);
 +    bGStatEveryStep = (nstglobalcomm == 1);
 +
 +    if (!bGStatEveryStep && ir->nstlist == -1 && fplog != NULL)
 +    {
 +        fprintf(fplog,
 +                "To reduce the energy communication with nstlist = -1\n"
 +                "the neighbor list validity should not be checked at every step,\n"
 +                "this means that exact integration is not guaranteed.\n"
 +                "The neighbor list validity is checked after:\n"
 +                "  <n.list life time> - 2*std.dev.(n.list life time)  steps.\n"
 +                "In most cases this will result in exact integration.\n"
 +                "This reduces the energy communication by a factor of 2 to 3.\n"
 +                "If you want less energy communication, set nstlist > 3.\n\n");
 +    }
 +
 +    if (bRerunMD || bFFscan)
 +    {
 +        ir->nstxtcout = 0;
 +    }
 +    groups = &top_global->groups;
 +
 +    /* Initial values */
 +    init_md(fplog,cr,ir,oenv,&t,&t0,state_global->lambda,
 +            &(state_global->fep_state),lam0,
 +            nrnb,top_global,&upd,
 +            nfile,fnm,&outf,&mdebin,
 +            force_vir,shake_vir,mu_tot,&bSimAnn,&vcm,state_global,Flags);
 +
 +    clear_mat(total_vir);
 +    clear_mat(pres);
 +    /* Energy terms and groups */
 +    snew(enerd,1);
 +    init_enerdata(top_global->groups.grps[egcENER].nr,ir->fepvals->n_lambda,
 +                  enerd);
 +    if (DOMAINDECOMP(cr))
 +    {
 +        f = NULL;
 +    }
 +    else
 +    {
 +        snew(f,top_global->natoms);
 +    }
 +
 +    /* lambda Monte carlo random number generator  */
 +    if (ir->bExpanded)
 +    {
 +        mcrng = gmx_rng_init(ir->expandedvals->lmc_seed);
 +    }
 +    /* copy the state into df_history */
 +    copy_df_history(&df_history,&state_global->dfhist);
 +
 +    /* Kinetic energy data */
 +    snew(ekind,1);
 +    init_ekindata(fplog,top_global,&(ir->opts),ekind);
 +    /* needed for iteration of constraints */
 +    snew(ekind_save,1);
 +    init_ekindata(fplog,top_global,&(ir->opts),ekind_save);
 +    /* Copy the cos acceleration to the groups struct */    
 +    ekind->cosacc.cos_accel = ir->cos_accel;
 +
 +    gstat = global_stat_init(ir);
 +    debug_gmx();
 +
 +    /* Check for polarizable models and flexible constraints */
 +    shellfc = init_shell_flexcon(fplog,
 +                                 top_global,n_flexible_constraints(constr),
 +                                 (ir->bContinuation || 
 +                                  (DOMAINDECOMP(cr) && !MASTER(cr))) ?
 +                                 NULL : state_global->x);
 +
 +    if (DEFORM(*ir))
 +    {
 +#ifdef GMX_THREAD_MPI
 +        tMPI_Thread_mutex_lock(&deform_init_box_mutex);
 +#endif
 +        set_deform_reference_box(upd,
 +                                 deform_init_init_step_tpx,
 +                                 deform_init_box_tpx);
 +#ifdef GMX_THREAD_MPI
 +        tMPI_Thread_mutex_unlock(&deform_init_box_mutex);
 +#endif
 +    }
 +
 +    {
 +        double io = compute_io(ir,top_global->natoms,groups,mdebin->ebin->nener,1);
 +        if ((io > 2000) && MASTER(cr))
 +            fprintf(stderr,
 +                    "\nWARNING: This run will generate roughly %.0f Mb of data\n\n",
 +                    io);
 +    }
 +
 +    if (DOMAINDECOMP(cr)) {
 +        top = dd_init_local_top(top_global);
 +
 +        snew(state,1);
 +        dd_init_local_state(cr->dd,state_global,state);
 +
 +        if (DDMASTER(cr->dd) && ir->nstfout) {
 +            snew(f_global,state_global->natoms);
 +        }
 +    } else {
 +        if (PAR(cr)) {
 +            /* Initialize the particle decomposition and split the topology */
 +            top = split_system(fplog,top_global,ir,cr);
 +
 +            pd_cg_range(cr,&fr->cg0,&fr->hcg);
 +            pd_at_range(cr,&a0,&a1);
 +        } else {
 +            top = gmx_mtop_generate_local_top(top_global,ir);
 +
 +            a0 = 0;
 +            a1 = top_global->natoms;
 +        }
 +
 +        forcerec_set_excl_load(fr,top,cr);
 +
 +        state = partdec_init_local_state(cr,state_global);
 +        f_global = f;
 +
 +        atoms2md(top_global,ir,0,NULL,a0,a1-a0,mdatoms);
 +
 +        if (vsite) {
 +            set_vsite_top(vsite,top,mdatoms,cr);
 +        }
 +
 +        if (ir->ePBC != epbcNONE && !fr->bMolPBC) {
 +            graph = mk_graph(fplog,&(top->idef),0,top_global->natoms,FALSE,FALSE);
 +        }
 +
 +        if (shellfc) {
 +            make_local_shells(cr,mdatoms,shellfc);
 +        }
 +
 +        init_bonded_thread_force_reduction(fr,&top->idef);
 +
 +        if (ir->pull && PAR(cr)) {
 +            dd_make_local_pull_groups(NULL,ir->pull,mdatoms);
 +        }
 +    }
 +
 +    if (DOMAINDECOMP(cr))
 +    {
 +        /* Distribute the charge groups over the nodes from the master node */
 +        dd_partition_system(fplog,ir->init_step,cr,TRUE,1,
 +                            state_global,top_global,ir,
 +                            state,&f,mdatoms,top,fr,
 +                            vsite,shellfc,constr,
 +                            nrnb,wcycle,FALSE);
 +
 +    }
 +
 +    update_mdatoms(mdatoms,state->lambda[efptMASS]);
 +
 +    if (opt2bSet("-cpi",nfile,fnm))
 +    {
 +        bStateFromCP = gmx_fexist_master(opt2fn_master("-cpi",nfile,fnm,cr),cr);
 +    }
 +    else
 +    {
 +        bStateFromCP = FALSE;
 +    }
 +
 +    if (MASTER(cr))
 +    {
 +        if (bStateFromCP)
 +        {
 +            /* Update mdebin with energy history if appending to output files */
 +            if ( Flags & MD_APPENDFILES )
 +            {
 +                restore_energyhistory_from_state(mdebin,&state_global->enerhist);
 +            }
 +            else
 +            {
 +                /* We might have read an energy history from checkpoint,
 +                 * free the allocated memory and reset the counts.
 +                 */
 +                done_energyhistory(&state_global->enerhist);
 +                init_energyhistory(&state_global->enerhist);
 +            }
 +        }
 +        /* Set the initial energy history in state by updating once */
 +        update_energyhistory(&state_global->enerhist,mdebin);
 +    } 
 +
 +    if ((state->flags & (1<<estLD_RNG)) && (Flags & MD_READ_RNG)) 
 +    {
 +        /* Set the random state if we read a checkpoint file */
 +        set_stochd_state(upd,state);
 +    }
 +
 +    if (state->flags & (1<<estMC_RNG))
 +    {
 +        set_mc_state(mcrng,state);
 +    }
 +
 +    /* Initialize constraints */
 +    if (constr) {
 +        if (!DOMAINDECOMP(cr))
 +            set_constraints(constr,top,ir,mdatoms,cr);
 +    }
 +
 +    /* Check whether we have to GCT stuff */
 +    bTCR = ftp2bSet(efGCT,nfile,fnm);
 +    if (bTCR) {
 +        if (MASTER(cr)) {
 +            fprintf(stderr,"Will do General Coupling Theory!\n");
 +        }
 +        gnx = top_global->mols.nr;
 +        snew(grpindex,gnx);
 +        for(i=0; (i<gnx); i++) {
 +            grpindex[i] = i;
 +        }
 +    }
 +
 +    if (repl_ex_nst > 0)
 +    {
 +        /* We need to be sure replica exchange can only occur
 +         * when the energies are current */
 +        check_nst_param(fplog,cr,"nstcalcenergy",ir->nstcalcenergy,
 +                        "repl_ex_nst",&repl_ex_nst);
 +        /* This check needs to happen before inter-simulation
 +         * signals are initialized, too */
 +    }
 +    if (repl_ex_nst > 0 && MASTER(cr))
 +    {
 +        repl_ex = init_replica_exchange(fplog,cr->ms,state_global,ir,
 +                                        repl_ex_nst,repl_ex_nex,repl_ex_seed);
 +    }
 +
 +    /* PME tuning is only supported with GPUs or PME nodes and not with rerun */
 +    if ((Flags & MD_TUNEPME) &&
 +        EEL_PME(fr->eeltype) &&
 +        ( (fr->cutoff_scheme == ecutsVERLET && fr->nbv->bUseGPU) || !(cr->duty & DUTY_PME)) &&
 +        !bRerunMD)
 +    {
 +        pme_loadbal_init(&pme_loadbal,ir,state->box,fr->ic,fr->pmedata);
 +        cycles_pmes = 0;
 +        if (cr->duty & DUTY_PME)
 +        {
 +            /* Start tuning right away, as we can't measure the load */
 +            bPMETuneRunning = TRUE;
 +        }
 +        else
 +        {
 +            /* Separate PME nodes, we can measure the PP/PME load balance */
 +            bPMETuneTry = TRUE;
 +        }
 +    }
 +
 +    if (!ir->bContinuation && !bRerunMD)
 +    {
 +        if (mdatoms->cFREEZE && (state->flags & (1<<estV)))
 +        {
 +            /* Set the velocities of frozen particles to zero */
 +            for(i=mdatoms->start; i<mdatoms->start+mdatoms->homenr; i++)
 +            {
 +                for(m=0; m<DIM; m++)
 +                {
 +                    if (ir->opts.nFreeze[mdatoms->cFREEZE[i]][m])
 +                    {
 +                        state->v[i][m] = 0;
 +                    }
 +                }
 +            }
 +        }
 +
 +        if (constr)
 +        {
 +            /* Constrain the initial coordinates and velocities */
 +            do_constrain_first(fplog,constr,ir,mdatoms,state,f,
 +                               graph,cr,nrnb,fr,top,shake_vir);
 +        }
 +        if (vsite)
 +        {
 +            /* Construct the virtual sites for the initial configuration */
 +            construct_vsites(fplog,vsite,state->x,nrnb,ir->delta_t,NULL,
 +                             top->idef.iparams,top->idef.il,
 +                             fr->ePBC,fr->bMolPBC,graph,cr,state->box);
 +        }
 +    }
 +
 +    debug_gmx();
 +  
 +    /* set free energy calculation frequency as the minimum of nstdhdl, nstexpanded, and nstrepl_ex_nst*/
 +    nstfep = ir->fepvals->nstdhdl;
 +    if (ir->bExpanded && (nstfep > ir->expandedvals->nstexpanded))
 +    {
 +        nstfep = ir->expandedvals->nstexpanded;
 +    }
-                                 | (bStopCM ? CGLO_STOPCM : 0)
++    if (repl_ex_nst > 0 && nstfep > repl_ex_nst)
 +    {
 +        nstfep = repl_ex_nst;
 +    }
 +
 +    /* I'm assuming we need global communication the first time! MRS */
 +    cglo_flags = (CGLO_TEMPERATURE | CGLO_GSTAT
 +                  | ((ir->comm_mode != ecmNO) ? CGLO_STOPCM:0)
 +                  | (bVV ? CGLO_PRESSURE:0)
 +                  | (bVV ? CGLO_CONSTRAINT:0)
 +                  | (bRerunMD ? CGLO_RERUNMD:0)
 +                  | ((Flags & MD_READ_EKIN) ? CGLO_READEKIN:0));
 +    
 +    bSumEkinhOld = FALSE;
 +    compute_globals(fplog,gstat,cr,ir,fr,ekind,state,state_global,mdatoms,nrnb,vcm,
 +                    NULL,enerd,force_vir,shake_vir,total_vir,pres,mu_tot,
 +                    constr,NULL,FALSE,state->box,
 +                    top_global,&pcurr,top_global->natoms,&bSumEkinhOld,cglo_flags);
 +    if (ir->eI == eiVVAK) {
 +        /* a second call to get the half step temperature initialized as well */ 
 +        /* we do the same call as above, but turn the pressure off -- internally to 
 +           compute_globals, this is recognized as a velocity verlet half-step 
 +           kinetic energy calculation.  This minimized excess variables, but 
 +           perhaps loses some logic?*/
 +        
 +        compute_globals(fplog,gstat,cr,ir,fr,ekind,state,state_global,mdatoms,nrnb,vcm,
 +                        NULL,enerd,force_vir,shake_vir,total_vir,pres,mu_tot,
 +                        constr,NULL,FALSE,state->box,
 +                        top_global,&pcurr,top_global->natoms,&bSumEkinhOld,
 +                        cglo_flags &~ (CGLO_STOPCM | CGLO_PRESSURE));
 +    }
 +    
 +    /* Calculate the initial half step temperature, and save the ekinh_old */
 +    if (!(Flags & MD_STARTFROMCPT)) 
 +    {
 +        for(i=0; (i<ir->opts.ngtc); i++) 
 +        {
 +            copy_mat(ekind->tcstat[i].ekinh,ekind->tcstat[i].ekinh_old);
 +        } 
 +    }
 +    if (ir->eI != eiVV) 
 +    {
 +        enerd->term[F_TEMP] *= 2; /* result of averages being done over previous and current step,
 +                                     and there is no previous step */
 +    }
 +    
 +    /* if using an iterative algorithm, we need to create a working directory for the state. */
 +    if (bIterations) 
 +    {
 +            bufstate = init_bufstate(state);
 +    }
 +    if (bFFscan) 
 +    {
 +        snew(xcopy,state->natoms);
 +        snew(vcopy,state->natoms);
 +        copy_rvecn(state->x,xcopy,0,state->natoms);
 +        copy_rvecn(state->v,vcopy,0,state->natoms);
 +        copy_mat(state->box,boxcopy);
 +    } 
 +    
 +    /* need to make an initiation call to get the Trotter variables set, as well as other constants for non-trotter
 +       temperature control */
 +    trotter_seq = init_npt_vars(ir,state,&MassQ,bTrotter);
 +    
 +    if (MASTER(cr))
 +    {
 +        if (constr && !ir->bContinuation && ir->eConstrAlg == econtLINCS)
 +        {
 +            fprintf(fplog,
 +                    "RMS relative constraint deviation after constraining: %.2e\n",
 +                    constr_rmsd(constr,FALSE));
 +        }
 +        if (EI_STATE_VELOCITY(ir->eI))
 +        {
 +            fprintf(fplog,"Initial temperature: %g K\n",enerd->term[F_TEMP]);
 +        }
 +        if (bRerunMD)
 +        {
 +            fprintf(stderr,"starting md rerun '%s', reading coordinates from"
 +                    " input trajectory '%s'\n\n",
 +                    *(top_global->name),opt2fn("-rerun",nfile,fnm));
 +            if (bVerbose)
 +            {
 +                fprintf(stderr,"Calculated time to finish depends on nsteps from "
 +                        "run input file,\nwhich may not correspond to the time "
 +                        "needed to process input trajectory.\n\n");
 +            }
 +        }
 +        else
 +        {
 +            char tbuf[20];
 +            fprintf(stderr,"starting mdrun '%s'\n",
 +                    *(top_global->name));
 +            if (ir->nsteps >= 0)
 +            {
 +                sprintf(tbuf,"%8.1f",(ir->init_step+ir->nsteps)*ir->delta_t);
 +            }
 +            else
 +            {
 +                sprintf(tbuf,"%s","infinite");
 +            }
 +            if (ir->init_step > 0)
 +            {
 +                fprintf(stderr,"%s steps, %s ps (continuing from step %s, %8.1f ps).\n",
 +                        gmx_step_str(ir->init_step+ir->nsteps,sbuf),tbuf,
 +                        gmx_step_str(ir->init_step,sbuf2),
 +                        ir->init_step*ir->delta_t);
 +            }
 +            else
 +            {
 +                fprintf(stderr,"%s steps, %s ps.\n",
 +                        gmx_step_str(ir->nsteps,sbuf),tbuf);
 +            }
 +        }
 +        fprintf(fplog,"\n");
 +    }
 +
 +    /* Set and write start time */
 +    runtime_start(runtime);
 +    print_date_and_time(fplog,cr->nodeid,"Started mdrun",runtime);
 +    wallcycle_start(wcycle,ewcRUN);
 +    if (fplog)
 +    {
 +        fprintf(fplog,"\n");
 +    }
 +
 +    /* safest point to do file checkpointing is here.  More general point would be immediately before integrator call */
 +#ifdef GMX_FAHCORE
 +    chkpt_ret=fcCheckPointParallel( cr->nodeid,
 +                                    NULL,0);
 +    if ( chkpt_ret == 0 ) 
 +        gmx_fatal( 3,__FILE__,__LINE__, "Checkpoint error on step %d\n", 0 );
 +#endif
 +
 +    debug_gmx();
 +    /***********************************************************
 +     *
 +     *             Loop over MD steps 
 +     *
 +     ************************************************************/
 +
 +    /* if rerunMD then read coordinates and velocities from input trajectory */
 +    if (bRerunMD)
 +    {
 +        if (getenv("GMX_FORCE_UPDATE"))
 +        {
 +            bForceUpdate = TRUE;
 +        }
 +
 +        rerun_fr.natoms = 0;
 +        if (MASTER(cr))
 +        {
 +            bNotLastFrame = read_first_frame(oenv,&status,
 +                                             opt2fn("-rerun",nfile,fnm),
 +                                             &rerun_fr,TRX_NEED_X | TRX_READ_V);
 +            if (rerun_fr.natoms != top_global->natoms)
 +            {
 +                gmx_fatal(FARGS,
 +                          "Number of atoms in trajectory (%d) does not match the "
 +                          "run input file (%d)\n",
 +                          rerun_fr.natoms,top_global->natoms);
 +            }
 +            if (ir->ePBC != epbcNONE)
 +            {
 +                if (!rerun_fr.bBox)
 +                {
 +                    gmx_fatal(FARGS,"Rerun trajectory frame step %d time %f does not contain a box, while pbc is used",rerun_fr.step,rerun_fr.time);
 +                }
 +                if (max_cutoff2(ir->ePBC,rerun_fr.box) < sqr(fr->rlistlong))
 +                {
 +                    gmx_fatal(FARGS,"Rerun trajectory frame step %d time %f has too small box dimensions",rerun_fr.step,rerun_fr.time);
 +                }
 +            }
 +        }
 +
 +        if (PAR(cr))
 +        {
 +            rerun_parallel_comm(cr,&rerun_fr,&bNotLastFrame);
 +        }
 +
 +        if (ir->ePBC != epbcNONE)
 +        {
 +            /* Set the shift vectors.
 +             * Necessary here when have a static box different from the tpr box.
 +             */
 +            calc_shifts(rerun_fr.box,fr->shift_vec);
 +        }
 +    }
 +
 +    /* loop over MD steps or if rerunMD to end of input trajectory */
 +    bFirstStep = TRUE;
 +    /* Skip the first Nose-Hoover integration when we get the state from tpx */
 +    bStateFromTPX = !bStateFromCP;
 +    bInitStep = bFirstStep && (bStateFromTPX || bVV);
 +    bStartingFromCpt = (Flags & MD_STARTFROMCPT) && bInitStep;
 +    bLastStep    = FALSE;
 +    bSumEkinhOld = FALSE;
 +    bExchanged   = FALSE;
 +
 +    init_global_signals(&gs,cr,ir,repl_ex_nst);
 +
 +    step = ir->init_step;
 +    step_rel = 0;
 +
 +    if (ir->nstlist == -1)
 +    {
 +        init_nlistheuristics(&nlh,bGStatEveryStep,step);
 +    }
 +
 +    if (MULTISIM(cr) && (repl_ex_nst <=0 ))
 +    {
 +        /* check how many steps are left in other sims */
 +        multisim_nsteps=get_multisim_nsteps(cr, ir->nsteps);
 +    }
 +
 +
 +    /* and stop now if we should */
 +    bLastStep = (bRerunMD || (ir->nsteps >= 0 && step_rel > ir->nsteps) ||
 +                 ((multisim_nsteps >= 0) && (step_rel >= multisim_nsteps )));
 +    while (!bLastStep || (bRerunMD && bNotLastFrame)) {
 +
 +        wallcycle_start(wcycle,ewcSTEP);
 +
 +        if (bRerunMD) {
 +            if (rerun_fr.bStep) {
 +                step = rerun_fr.step;
 +                step_rel = step - ir->init_step;
 +            }
 +            if (rerun_fr.bTime) {
 +                t = rerun_fr.time;
 +            }
 +            else
 +            {
 +                t = step;
 +            }
 +        } 
 +        else 
 +        {
 +            bLastStep = (step_rel == ir->nsteps);
 +            t = t0 + step*ir->delta_t;
 +        }
 +
 +        if (ir->efep != efepNO || ir->bSimTemp)
 +            {
 +            /* find and set the current lambdas.  If rerunning, we either read in a state, or a lambda value,
 +               requiring different logic. */
 +            
 +            set_current_lambdas(step,ir->fepvals,bRerunMD,&rerun_fr,state_global,state,lam0);
 +            bDoDHDL = do_per_step(step,ir->fepvals->nstdhdl);
 +            bDoFEP  = (do_per_step(step,nstfep) && (ir->efep != efepNO));
 +            bDoExpanded  = (do_per_step(step,ir->expandedvals->nstexpanded) && (ir->bExpanded) && (step > 0));
 +        }
 +
 +        if (bSimAnn) 
 +        {
 +            update_annealing_target_temp(&(ir->opts),t);
 +        }
 +
 +        if (bRerunMD)
 +        {
 +            if (!(DOMAINDECOMP(cr) && !MASTER(cr)))
 +            {
 +                for(i=0; i<state_global->natoms; i++)
 +                {
 +                    copy_rvec(rerun_fr.x[i],state_global->x[i]);
 +                }
 +                if (rerun_fr.bV)
 +                {
 +                    for(i=0; i<state_global->natoms; i++)
 +                    {
 +                        copy_rvec(rerun_fr.v[i],state_global->v[i]);
 +                    }
 +                }
 +                else
 +                {
 +                    for(i=0; i<state_global->natoms; i++)
 +                    {
 +                        clear_rvec(state_global->v[i]);
 +                    }
 +                    if (bRerunWarnNoV)
 +                    {
 +                        fprintf(stderr,"\nWARNING: Some frames do not contain velocities.\n"
 +                                "         Ekin, temperature and pressure are incorrect,\n"
 +                                "         the virial will be incorrect when constraints are present.\n"
 +                                "\n");
 +                        bRerunWarnNoV = FALSE;
 +                    }
 +                }
 +            }
 +            copy_mat(rerun_fr.box,state_global->box);
 +            copy_mat(state_global->box,state->box);
 +
 +            if (vsite && (Flags & MD_RERUN_VSITE))
 +            {
 +                if (DOMAINDECOMP(cr))
 +                {
 +                    gmx_fatal(FARGS,"Vsite recalculation with -rerun is not implemented for domain decomposition, use particle decomposition");
 +                }
 +                if (graph)
 +                {
 +                    /* Following is necessary because the graph may get out of sync
 +                     * with the coordinates if we only have every N'th coordinate set
 +                     */
 +                    mk_mshift(fplog,graph,fr->ePBC,state->box,state->x);
 +                    shift_self(graph,state->box,state->x);
 +                }
 +                construct_vsites(fplog,vsite,state->x,nrnb,ir->delta_t,state->v,
 +                                 top->idef.iparams,top->idef.il,
 +                                 fr->ePBC,fr->bMolPBC,graph,cr,state->box);
 +                if (graph)
 +                {
 +                    unshift_self(graph,state->box,state->x);
 +                }
 +            }
 +        }
 +
 +        /* Stop Center of Mass motion */
 +        bStopCM = (ir->comm_mode != ecmNO && do_per_step(step,ir->nstcomm));
 +
 +        /* Copy back starting coordinates in case we're doing a forcefield scan */
 +        if (bFFscan)
 +        {
 +            for(ii=0; (ii<state->natoms); ii++)
 +            {
 +                copy_rvec(xcopy[ii],state->x[ii]);
 +                copy_rvec(vcopy[ii],state->v[ii]);
 +            }
 +            copy_mat(boxcopy,state->box);
 +        }
 +
 +        if (bRerunMD)
 +        {
 +            /* for rerun MD always do Neighbour Searching */
 +            bNS = (bFirstStep || ir->nstlist != 0);
 +            bNStList = bNS;
 +        }
 +        else
 +        {
 +            /* Determine whether or not to do Neighbour Searching and LR */
 +            bNStList = (ir->nstlist > 0  && step % ir->nstlist == 0);
 +            
 +            bNS = (bFirstStep || bExchanged || bNStList || bDoFEP ||
 +                   (ir->nstlist == -1 && nlh.nabnsb > 0));
 +
 +            if (bNS && ir->nstlist == -1)
 +            {
 +                set_nlistheuristics(&nlh,bFirstStep || bExchanged || bDoFEP, step);
 +            }
 +        } 
 +
 +        /* check whether we should stop because another simulation has 
 +           stopped. */
 +        if (MULTISIM(cr))
 +        {
 +            if ( (multisim_nsteps >= 0) &&  (step_rel >= multisim_nsteps)  &&  
 +                 (multisim_nsteps != ir->nsteps) )  
 +            {
 +                if (bNS)
 +                {
 +                    if (MASTER(cr))
 +                    {
 +                        fprintf(stderr, 
 +                                "Stopping simulation %d because another one has finished\n",
 +                                cr->ms->sim);
 +                    }
 +                    bLastStep=TRUE;
 +                    gs.sig[eglsCHKPT] = 1;
 +                }
 +            }
 +        }
 +
 +        /* < 0 means stop at next step, > 0 means stop at next NS step */
 +        if ( (gs.set[eglsSTOPCOND] < 0 ) ||
 +             ( (gs.set[eglsSTOPCOND] > 0 ) && ( bNS || ir->nstlist==0)) )
 +        {
 +            bLastStep = TRUE;
 +        }
 +
 +        /* Determine whether or not to update the Born radii if doing GB */
 +        bBornRadii=bFirstStep;
 +        if (ir->implicit_solvent && (step % ir->nstgbradii==0))
 +        {
 +            bBornRadii=TRUE;
 +        }
 +        
 +        do_log = do_per_step(step,ir->nstlog) || bFirstStep || bLastStep;
 +        do_verbose = bVerbose &&
 +                  (step % stepout == 0 || bFirstStep || bLastStep);
 +
 +        if (bNS && !(bFirstStep && ir->bContinuation && !bRerunMD))
 +        {
 +            if (bRerunMD)
 +            {
 +                bMasterState = TRUE;
 +            }
 +            else
 +            {
 +                bMasterState = FALSE;
 +                /* Correct the new box if it is too skewed */
 +                if (DYNAMIC_BOX(*ir))
 +                {
 +                    if (correct_box(fplog,step,state->box,graph))
 +                    {
 +                        bMasterState = TRUE;
 +                    }
 +                }
 +                if (DOMAINDECOMP(cr) && bMasterState)
 +                {
 +                    dd_collect_state(cr->dd,state,state_global);
 +                }
 +            }
 +
 +            if (DOMAINDECOMP(cr))
 +            {
 +                /* Repartition the domain decomposition */
 +                wallcycle_start(wcycle,ewcDOMDEC);
 +                dd_partition_system(fplog,step,cr,
 +                                    bMasterState,nstglobalcomm,
 +                                    state_global,top_global,ir,
 +                                    state,&f,mdatoms,top,fr,
 +                                    vsite,shellfc,constr,
 +                                    nrnb,wcycle,
 +                                    do_verbose && !bPMETuneRunning);
 +                wallcycle_stop(wcycle,ewcDOMDEC);
 +                /* If using an iterative integrator, reallocate space to match the decomposition */
 +            }
 +        }
 +
 +        if (MASTER(cr) && do_log && !bFFscan)
 +        {
 +            print_ebin_header(fplog,step,t,state->lambda[efptFEP]); /* can we improve the information printed here? */
 +        }
 +
 +        if (ir->efep != efepNO)
 +        {
 +            update_mdatoms(mdatoms,state->lambda[efptMASS]);
 +        }
 +
 +        if ((bRerunMD && rerun_fr.bV) || bExchanged)
 +        {
 +            
 +            /* We need the kinetic energy at minus the half step for determining
 +             * the full step kinetic energy and possibly for T-coupling.*/
 +            /* This may not be quite working correctly yet . . . . */
 +            compute_globals(fplog,gstat,cr,ir,fr,ekind,state,state_global,mdatoms,nrnb,vcm,
 +                            wcycle,enerd,NULL,NULL,NULL,NULL,mu_tot,
 +                            constr,NULL,FALSE,state->box,
 +                            top_global,&pcurr,top_global->natoms,&bSumEkinhOld,
 +                            CGLO_RERUNMD | CGLO_GSTAT | CGLO_TEMPERATURE);
 +        }
 +        clear_mat(force_vir);
 +        
 +        /* Ionize the atoms if necessary */
 +        if (bIonize)
 +        {
 +            ionize(fplog,oenv,mdatoms,top_global,t,ir,state->x,state->v,
 +                   mdatoms->start,mdatoms->start+mdatoms->homenr,state->box,cr);
 +        }
 +        
 +        /* Update force field in ffscan program */
 +        if (bFFscan)
 +        {
 +            if (update_forcefield(fplog,
 +                                  nfile,fnm,fr,
 +                                  mdatoms->nr,state->x,state->box))
 +            {
 +                gmx_finalize_par();
 +
 +                exit(0);
 +            }
 +        }
 +
 +        /* We write a checkpoint at this MD step when:
 +         * either at an NS step when we signalled through gs,
 +         * or at the last step (but not when we do not want confout),
 +         * but never at the first step or with rerun.
 +         */
 +        bCPT = (((gs.set[eglsCHKPT] && (bNS || ir->nstlist == 0)) ||
 +                 (bLastStep && (Flags & MD_CONFOUT))) &&
 +                step > ir->init_step && !bRerunMD);
 +        if (bCPT)
 +        {
 +            gs.set[eglsCHKPT] = 0;
 +        }
 +
 +        /* Determine the energy and pressure:
 +         * at nstcalcenergy steps and at energy output steps (set below).
 +         */
 +        if (EI_VV(ir->eI) && (!bInitStep))
 +        {
 +            /* for vv, the first half actually corresponds to the last step */
 +            bCalcEner = do_per_step(step-1,ir->nstcalcenergy);
 +        }
 +        else
 +        {
 +            bCalcEner = do_per_step(step,ir->nstcalcenergy);
 +        }
 +        bCalcVir = bCalcEner ||
 +            (ir->epc != epcNO && do_per_step(step,ir->nstpcouple));
 +
 +        /* Do we need global communication ? */
 +        bGStat = (bCalcVir || bCalcEner || bStopCM ||
 +                  do_per_step(step,nstglobalcomm) ||
 +                  (ir->nstlist == -1 && !bRerunMD && step >= nlh.step_nscheck));
 +
 +        do_ene = (do_per_step(step,ir->nstenergy) || bLastStep);
 +
 +        if (do_ene || do_log)
 +        {
 +            bCalcVir  = TRUE;
 +            bCalcEner = TRUE;
 +            bGStat    = TRUE;
 +        }
 +        
 +        /* these CGLO_ options remain the same throughout the iteration */
 +        cglo_flags = ((bRerunMD ? CGLO_RERUNMD : 0) |
 +                      (bGStat ? CGLO_GSTAT : 0)
 +            );
 +        
 +        force_flags = (GMX_FORCE_STATECHANGED |
 +                       ((DYNAMIC_BOX(*ir) || bRerunMD) ? GMX_FORCE_DYNAMICBOX : 0) |
 +                       GMX_FORCE_ALLFORCES |
 +                       GMX_FORCE_SEPLRF |
 +                       (bCalcVir ? GMX_FORCE_VIRIAL : 0) |
 +                       (bCalcEner ? GMX_FORCE_ENERGY : 0) |
 +                       (bDoFEP ? GMX_FORCE_DHDL : 0)
 +            );
 +
 +        if(fr->bTwinRange)
 +        {
 +            if(do_per_step(step,ir->nstcalclr))
 +            {
 +                force_flags |= GMX_FORCE_DO_LR;
 +            }
 +        }
 +        
 +        if (shellfc)
 +        {
 +            /* Now is the time to relax the shells */
 +            count=relax_shell_flexcon(fplog,cr,bVerbose,bFFscan ? step+1 : step,
 +                                      ir,bNS,force_flags,
 +                                      bStopCM,top,top_global,
 +                                      constr,enerd,fcd,
 +                                      state,f,force_vir,mdatoms,
 +                                      nrnb,wcycle,graph,groups,
 +                                      shellfc,fr,bBornRadii,t,mu_tot,
 +                                      state->natoms,&bConverged,vsite,
 +                                      outf->fp_field);
 +            tcount+=count;
 +
 +            if (bConverged)
 +            {
 +                nconverged++;
 +            }
 +        }
 +        else
 +        {
 +            /* The coordinates (x) are shifted (to get whole molecules)
 +             * in do_force.
 +             * This is parallellized as well, and does communication too. 
 +             * Check comments in sim_util.c
 +             */
 +             do_force(fplog,cr,ir,step,nrnb,wcycle,top,top_global,groups,
 +                     state->box,state->x,&state->hist,
 +                     f,force_vir,mdatoms,enerd,fcd,
 +                     state->lambda,graph,
 +                     fr,vsite,mu_tot,t,outf->fp_field,ed,bBornRadii,
 +                     (bNS ? GMX_FORCE_NS : 0) | force_flags);
 +        }
 +        
 +        if (bTCR)
 +        {
 +            mu_aver = calc_mu_aver(cr,state->x,mdatoms->chargeA,
 +                                   mu_tot,&top_global->mols,mdatoms,gnx,grpindex);
 +        }
 +        
 +        if (bTCR && bFirstStep)
 +        {
 +            tcr=init_coupling(fplog,nfile,fnm,cr,fr,mdatoms,&(top->idef));
 +            fprintf(fplog,"Done init_coupling\n"); 
 +            fflush(fplog);
 +        }
 +        
 +        if (bVV && !bStartingFromCpt && !bRerunMD)
 +        /*  ############### START FIRST UPDATE HALF-STEP FOR VV METHODS############### */
 +        {
 +            if (ir->eI==eiVV && bInitStep) 
 +            {
 +                /* if using velocity verlet with full time step Ekin,
 +                 * take the first half step only to compute the 
 +                 * virial for the first step. From there,
 +                 * revert back to the initial coordinates
 +                 * so that the input is actually the initial step.
 +                 */
 +                copy_rvecn(state->v,cbuf,0,state->natoms); /* should make this better for parallelizing? */
 +            } else {
 +                /* this is for NHC in the Ekin(t+dt/2) version of vv */
 +                trotter_update(ir,step,ekind,enerd,state,total_vir,mdatoms,&MassQ,trotter_seq,ettTSEQ1);            
 +            }
 +
 +            /* If we are using twin-range interactions where the long-range component
 +             * is only evaluated every nstcalclr>1 steps, we should do a special update
 +             * step to combine the long-range forces on these steps.
 +             * For nstcalclr=1 this is not done, since the forces would have been added
 +             * directly to the short-range forces already.
 +             */
 +            bUpdateDoLR = (fr->bTwinRange && do_per_step(step,ir->nstcalclr));
 +            
 +            update_coords(fplog,step,ir,mdatoms,state,fr->bMolPBC,
 +                          f,bUpdateDoLR,fr->f_twin,fcd,
 +                          ekind,M,wcycle,upd,bInitStep,etrtVELOCITY1,
 +                          cr,nrnb,constr,&top->idef);
 +            
 +            if (bIterations)
 +            {
 +                gmx_iterate_init(&iterate,bIterations && !bInitStep);
 +            }
 +            /* for iterations, we save these vectors, as we will be self-consistently iterating
 +               the calculations */
 +
 +            /*#### UPDATE EXTENDED VARIABLES IN TROTTER FORMULATION */
 +            
 +            /* save the state */
 +            if (bIterations && iterate.bIterate) { 
 +                copy_coupling_state(state,bufstate,ekind,ekind_save,&(ir->opts));
 +            }
 +            
 +            bFirstIterate = TRUE;
 +            while (bFirstIterate || (bIterations && iterate.bIterate))
 +            {
 +                if (bIterations && iterate.bIterate) 
 +                {
 +                    copy_coupling_state(bufstate,state,ekind_save,ekind,&(ir->opts));
 +                    if (bFirstIterate && bTrotter) 
 +                    {
 +                        /* The first time through, we need a decent first estimate
 +                           of veta(t+dt) to compute the constraints.  Do
 +                           this by computing the box volume part of the
 +                           trotter integration at this time. Nothing else
 +                           should be changed by this routine here.  If
 +                           !(first time), we start with the previous value
 +                           of veta.  */
 +                        
 +                        veta_save = state->veta;
 +                        trotter_update(ir,step,ekind,enerd,state,total_vir,mdatoms,&MassQ,trotter_seq,ettTSEQ0);
 +                        vetanew = state->veta;
 +                        state->veta = veta_save;
 +                    } 
 +                } 
 +                
 +                bOK = TRUE;
 +                if ( !bRerunMD || rerun_fr.bV || bForceUpdate) {  /* Why is rerun_fr.bV here?  Unclear. */
 +                    dvdl = 0;
 +                    
 +                    update_constraints(fplog,step,&dvdl,ir,ekind,mdatoms,
 +                                       state,fr->bMolPBC,graph,f,
 +                                       &top->idef,shake_vir,NULL,
 +                                       cr,nrnb,wcycle,upd,constr,
 +                                       bInitStep,TRUE,bCalcVir,vetanew);
 +                    
 +                    if (!bOK && !bFFscan)
 +                    {
 +                        gmx_fatal(FARGS,"Constraint error: Shake, Lincs or Settle could not solve the constrains");
 +                    }
 +                    
 +                } 
 +                else if (graph)
 +                { /* Need to unshift here if a do_force has been
 +                     called in the previous step */
 +                    unshift_self(graph,state->box,state->x);
 +                }
 +                
 +                
 +                /* if VV, compute the pressure and constraints */
 +                /* For VV2, we strictly only need this if using pressure
 +                 * control, but we really would like to have accurate pressures
 +                 * printed out.
 +                 * Think about ways around this in the future?
 +                 * For now, keep this choice in comments.
 +                 */
 +                /*bPres = (ir->eI==eiVV || IR_NPT_TROTTER(ir)); */
 +                    /*bTemp = ((ir->eI==eiVV &&(!bInitStep)) || (ir->eI==eiVVAK && IR_NPT_TROTTER(ir)));*/
 +                bPres = TRUE;
 +                bTemp = ((ir->eI==eiVV &&(!bInitStep)) || (ir->eI==eiVVAK));
 +                if (bCalcEner && ir->eI==eiVVAK)  /*MRS:  7/9/2010 -- this still doesn't fix it?*/
 +                {
 +                    bSumEkinhOld = TRUE;
 +                }
 +                compute_globals(fplog,gstat,cr,ir,fr,ekind,state,state_global,mdatoms,nrnb,vcm,
 +                                wcycle,enerd,force_vir,shake_vir,total_vir,pres,mu_tot,
 +                                constr,NULL,FALSE,state->box,
 +                                top_global,&pcurr,top_global->natoms,&bSumEkinhOld,
 +                                cglo_flags 
 +                                | CGLO_ENERGY 
-                         update_tcouple(fplog,step,ir,state,ekind,wcycle,upd,&MassQ,mdatoms);
 +                                | (bTemp ? CGLO_TEMPERATURE:0) 
 +                                | (bPres ? CGLO_PRESSURE : 0) 
 +                                | (bPres ? CGLO_CONSTRAINT : 0)
 +                                | ((bIterations && iterate.bIterate) ? CGLO_ITERATE : 0)  
 +                                | (bFirstIterate ? CGLO_FIRSTITERATE : 0)
 +                                | CGLO_SCALEEKIN 
 +                    );
 +                /* explanation of above: 
 +                   a) We compute Ekin at the full time step
 +                   if 1) we are using the AveVel Ekin, and it's not the
 +                   initial step, or 2) if we are using AveEkin, but need the full
 +                   time step kinetic energy for the pressure (always true now, since we want accurate statistics).
 +                   b) If we are using EkinAveEkin for the kinetic energy for the temperture control, we still feed in 
 +                   EkinAveVel because it's needed for the pressure */
 +                
 +                /* temperature scaling and pressure scaling to produce the extended variables at t+dt */
 +                if (!bInitStep) 
 +                {
 +                    if (bTrotter)
 +                    {
++                        m_add(force_vir,shake_vir,total_vir); /* we need the un-dispersion corrected total vir here */
 +                        trotter_update(ir,step,ekind,enerd,state,total_vir,mdatoms,&MassQ,trotter_seq,ettTSEQ2);
 +                    } 
 +                    else 
 +                    {
 +                        if (bExchanged)
 +                        {
 +            
 +                            /* We need the kinetic energy at minus the half step for determining
 +                             * the full step kinetic energy and possibly for T-coupling.*/
 +                            /* This may not be quite working correctly yet . . . . */
 +                            compute_globals(fplog,gstat,cr,ir,fr,ekind,state,state_global,mdatoms,nrnb,vcm,
 +                                            wcycle,enerd,NULL,NULL,NULL,NULL,mu_tot,
 +                                            constr,NULL,FALSE,state->box,
 +                                            top_global,&pcurr,top_global->natoms,&bSumEkinhOld,
 +                                            CGLO_RERUNMD | CGLO_GSTAT | CGLO_TEMPERATURE);
 +                        }
-             sum_dhdl(enerd,state->lambda,ir->fepvals);
 +                    }
 +                }
 +                
 +                if (bIterations &&
 +                    done_iterating(cr,fplog,step,&iterate,bFirstIterate,
 +                                   state->veta,&vetanew)) 
 +                {
 +                    break;
 +                }
 +                bFirstIterate = FALSE;
 +            }
 +
 +            if (bTrotter && !bInitStep) {
 +                enerd->term[F_DVDL_BONDED] += dvdl;        /* only add after iterations */
 +                copy_mat(shake_vir,state->svir_prev);
 +                copy_mat(force_vir,state->fvir_prev);
 +                if (IR_NVT_TROTTER(ir) && ir->eI==eiVV) {
 +                    /* update temperature and kinetic energy now that step is over - this is the v(t+dt) point */
 +                    enerd->term[F_TEMP] = sum_ekin(&(ir->opts),ekind,NULL,(ir->eI==eiVV),FALSE,FALSE);
 +                    enerd->term[F_EKIN] = trace(ekind->ekin);
 +                }
 +            }
 +            /* if it's the initial step, we performed this first step just to get the constraint virial */
 +            if (bInitStep && ir->eI==eiVV) {
 +                copy_rvecn(cbuf,state->v,0,state->natoms);
 +            }
 +            
 +            if (fr->bSepDVDL && fplog && do_log) 
 +            {
 +                fprintf(fplog,sepdvdlformat,"Constraint",0.0,dvdl);
 +            }
 +            enerd->term[F_DVDL_BONDED] += dvdl;
 +        }
 +    
 +        /* MRS -- now done iterating -- compute the conserved quantity */
 +        if (bVV) {
 +            saved_conserved_quantity = compute_conserved_from_auxiliary(ir,state,&MassQ);
 +            if (ir->eI==eiVV) 
 +            {
 +                last_ekin = enerd->term[F_EKIN];
 +            }
 +            if ((ir->eDispCorr != edispcEnerPres) && (ir->eDispCorr != edispcAllEnerPres)) 
 +            {
 +                saved_conserved_quantity -= enerd->term[F_DISPCORR];
 +            }
 +            /* sum up the foreign energy and dhdl terms for vv.  currently done every step so that dhdl is correct in the .edr */
-         /* at the start of step, randomize the velocities */
-         if (ETC_ANDERSEN(ir->etc) && EI_VV(ir->eI))
++            if (!bRerunMD)
++            {
++                sum_dhdl(enerd,state->lambda,ir->fepvals);
++            }
 +        }
 +        
 +        /* ########  END FIRST UPDATE STEP  ############## */
 +        /* ########  If doing VV, we now have v(dt) ###### */
 +        if (bDoExpanded) {
 +            /* perform extended ensemble sampling in lambda - we don't
 +               actually move to the new state before outputting
 +               statistics, but if performing simulated tempering, we
 +               do update the velocities and the tau_t. */
 +        
 +            lamnew = ExpandedEnsembleDynamics(fplog,ir,enerd,state,&MassQ,&df_history,step,mcrng,state->v,mdatoms);
 +        }
 +        /* ################## START TRAJECTORY OUTPUT ################# */
 +        
 +        /* Now we have the energies and forces corresponding to the 
 +         * coordinates at time t. We must output all of this before
 +         * the update.
 +         * for RerunMD t is read from input trajectory
 +         */
 +        mdof_flags = 0;
 +        if (do_per_step(step,ir->nstxout)) { mdof_flags |= MDOF_X; }
 +        if (do_per_step(step,ir->nstvout)) { mdof_flags |= MDOF_V; }
 +        if (do_per_step(step,ir->nstfout)) { mdof_flags |= MDOF_F; }
 +        if (do_per_step(step,ir->nstxtcout)) { mdof_flags |= MDOF_XTC; }
 +        if (bCPT) { mdof_flags |= MDOF_CPT; };
 +
 +#if defined(GMX_FAHCORE) || defined(GMX_WRITELASTSTEP)
 +        if (bLastStep)
 +        {
 +            /* Enforce writing positions and velocities at end of run */
 +            mdof_flags |= (MDOF_X | MDOF_V);
 +        }
 +#endif
 +#ifdef GMX_FAHCORE
 +        if (MASTER(cr))
 +            fcReportProgress( ir->nsteps, step );
 +
 +        /* sync bCPT and fc record-keeping */
 +        if (bCPT && MASTER(cr))
 +            fcRequestCheckPoint();
 +#endif
 +        
 +        if (mdof_flags != 0)
 +        {
 +            wallcycle_start(wcycle,ewcTRAJ);
 +            if (bCPT)
 +            {
 +                if (state->flags & (1<<estLD_RNG))
 +                {
 +                    get_stochd_state(upd,state);
 +                }
 +                if (state->flags  & (1<<estMC_RNG))
 +                {
 +                    get_mc_state(mcrng,state);
 +                }
 +                if (MASTER(cr))
 +                {
 +                    if (bSumEkinhOld)
 +                    {
 +                        state_global->ekinstate.bUpToDate = FALSE;
 +                    }
 +                    else
 +                    {
 +                        update_ekinstate(&state_global->ekinstate,ekind);
 +                        state_global->ekinstate.bUpToDate = TRUE;
 +                    }
 +                    update_energyhistory(&state_global->enerhist,mdebin);
 +                    if (ir->efep!=efepNO || ir->bSimTemp) 
 +                    {
 +                        state_global->fep_state = state->fep_state; /* MRS: seems kludgy. The code should be
 +                                                                       structured so this isn't necessary.
 +                                                                       Note this reassignment is only necessary
 +                                                                       for single threads.*/
 +                        copy_df_history(&state_global->dfhist,&df_history);
 +                    }
 +                }
 +            }
 +            write_traj(fplog,cr,outf,mdof_flags,top_global,
 +                       step,t,state,state_global,f,f_global,&n_xtc,&x_xtc);
 +            if (bCPT)
 +            {
 +                nchkpt++;
 +                bCPT = FALSE;
 +            }
 +            debug_gmx();
 +            if (bLastStep && step_rel == ir->nsteps &&
 +                (Flags & MD_CONFOUT) && MASTER(cr) &&
 +                !bRerunMD && !bFFscan)
 +            {
 +                /* x and v have been collected in write_traj,
 +                 * because a checkpoint file will always be written
 +                 * at the last step.
 +                 */
 +                fprintf(stderr,"\nWriting final coordinates.\n");
 +                if (fr->bMolPBC)
 +                {
 +                    /* Make molecules whole only for confout writing */
 +                    do_pbc_mtop(fplog,ir->ePBC,state->box,top_global,state_global->x);
 +                }
 +                write_sto_conf_mtop(ftp2fn(efSTO,nfile,fnm),
 +                                    *top_global->name,top_global,
 +                                    state_global->x,state_global->v,
 +                                    ir->ePBC,state->box);
 +                debug_gmx();
 +            }
 +            wallcycle_stop(wcycle,ewcTRAJ);
 +        }
 +        
 +        /* kludge -- virial is lost with restart for NPT control. Must restart */
 +        if (bStartingFromCpt && bVV) 
 +        {
 +            copy_mat(state->svir_prev,shake_vir);
 +            copy_mat(state->fvir_prev,force_vir);
 +        }
 +        /*  ################## END TRAJECTORY OUTPUT ################ */
 +        
 +        /* Determine the wallclock run time up till now */
 +        run_time = gmx_gettime() - (double)runtime->real;
 +
 +        /* Check whether everything is still allright */    
 +        if (((int)gmx_get_stop_condition() > handled_stop_condition)
 +#ifdef GMX_THREAD_MPI
 +            && MASTER(cr)
 +#endif
 +            )
 +        {
 +            /* this is just make gs.sig compatible with the hack 
 +               of sending signals around by MPI_Reduce with together with
 +               other floats */
 +            if ( gmx_get_stop_condition() == gmx_stop_cond_next_ns )
 +                gs.sig[eglsSTOPCOND]=1;
 +            if ( gmx_get_stop_condition() == gmx_stop_cond_next )
 +                gs.sig[eglsSTOPCOND]=-1;
 +            /* < 0 means stop at next step, > 0 means stop at next NS step */
 +            if (fplog)
 +            {
 +                fprintf(fplog,
 +                        "\n\nReceived the %s signal, stopping at the next %sstep\n\n",
 +                        gmx_get_signal_name(),
 +                        gs.sig[eglsSTOPCOND]==1 ? "NS " : "");
 +                fflush(fplog);
 +            }
 +            fprintf(stderr,
 +                    "\n\nReceived the %s signal, stopping at the next %sstep\n\n",
 +                    gmx_get_signal_name(),
 +                    gs.sig[eglsSTOPCOND]==1 ? "NS " : "");
 +            fflush(stderr);
 +            handled_stop_condition=(int)gmx_get_stop_condition();
 +        }
 +        else if (MASTER(cr) && (bNS || ir->nstlist <= 0) &&
 +                 (max_hours > 0 && run_time > max_hours*60.0*60.0*0.99) &&
 +                 gs.sig[eglsSTOPCOND] == 0 && gs.set[eglsSTOPCOND] == 0)
 +        {
 +            /* Signal to terminate the run */
 +            gs.sig[eglsSTOPCOND] = 1;
 +            if (fplog)
 +            {
 +                fprintf(fplog,"\nStep %s: Run time exceeded %.3f hours, will terminate the run\n",gmx_step_str(step,sbuf),max_hours*0.99);
 +            }
 +            fprintf(stderr, "\nStep %s: Run time exceeded %.3f hours, will terminate the run\n",gmx_step_str(step,sbuf),max_hours*0.99);
 +        }
 +
 +        if (bResetCountersHalfMaxH && MASTER(cr) &&
 +            run_time > max_hours*60.0*60.0*0.495)
 +        {
 +            gs.sig[eglsRESETCOUNTERS] = 1;
 +        }
 +
 +        if (ir->nstlist == -1 && !bRerunMD)
 +        {
 +            /* When bGStatEveryStep=FALSE, global_stat is only called
 +             * when we check the atom displacements, not at NS steps.
 +             * This means that also the bonded interaction count check is not
 +             * performed immediately after NS. Therefore a few MD steps could
 +             * be performed with missing interactions.
 +             * But wrong energies are never written to file,
 +             * since energies are only written after global_stat
 +             * has been called.
 +             */
 +            if (step >= nlh.step_nscheck)
 +            {
 +                nlh.nabnsb = natoms_beyond_ns_buffer(ir,fr,&top->cgs,
 +                                                     nlh.scale_tot,state->x);
 +            }
 +            else
 +            {
 +                /* This is not necessarily true,
 +                 * but step_nscheck is determined quite conservatively.
 +                 */
 +                nlh.nabnsb = 0;
 +            }
 +        }
 +
 +        /* In parallel we only have to check for checkpointing in steps
 +         * where we do global communication,
 +         *  otherwise the other nodes don't know.
 +         */
 +        if (MASTER(cr) && ((bGStat || !PAR(cr)) &&
 +                           cpt_period >= 0 &&
 +                           (cpt_period == 0 || 
 +                            run_time >= nchkpt*cpt_period*60.0)) &&
 +            gs.set[eglsCHKPT] == 0)
 +        {
 +            gs.sig[eglsCHKPT] = 1;
 +        }
 +  
-             gmx_bool bDoAndersenConstr;
-             bDoAndersenConstr = (constr && update_randomize_velocities(ir,step,mdatoms,state,upd,&top->idef,constr));
-             /* if we have constraints, we have to remove the kinetic energy parallel to the bonds */
-             if (bDoAndersenConstr)
++        /* at the start of step, randomize or scale the velocities (trotter done elsewhere) */
++        if (EI_VV(ir->eI))
 +        {
-                 update_constraints(fplog,step,&dvdl,ir,ekind,mdatoms,
-                                    state,fr->bMolPBC,graph,f,
-                                    &top->idef,tmp_vir,NULL,
-                                    cr,nrnb,wcycle,upd,constr,
-                                    bInitStep,TRUE,bCalcVir,vetanew);
++            if (!bInitStep)
 +            {
-                                 | (!EI_VV(ir->eI) ? CGLO_ENERGY : 0)
++                update_tcouple(fplog,step,ir,state,ekind,wcycle,upd,&MassQ,mdatoms);
++            }
++            if (ETC_ANDERSEN(ir->etc)) /* keep this outside of update_tcouple because of the extra info required to pass */
++            {
++                gmx_bool bIfRandomize;
++                bIfRandomize = update_randomize_velocities(ir,step,mdatoms,state,upd,&top->idef,constr);
++                /* if we have constraints, we have to remove the kinetic energy parallel to the bonds */
++                if (constr && bIfRandomize)
++                {
++                    update_constraints(fplog,step,&dvdl,ir,ekind,mdatoms,
++                                       state,fr->bMolPBC,graph,f,
++                                       &top->idef,tmp_vir,NULL,
++                                       cr,nrnb,wcycle,upd,constr,
++                                       bInitStep,TRUE,bCalcVir,vetanew);
++                }
 +            }
 +        }
 +
 +        if (bIterations)
 +        {
 +            gmx_iterate_init(&iterate,bIterations);
 +        }
 +    
 +        /* for iterations, we save these vectors, as we will be redoing the calculations */
 +        if (bIterations && iterate.bIterate) 
 +        {
 +            copy_coupling_state(state,bufstate,ekind,ekind_save,&(ir->opts));
 +        }
 +        bFirstIterate = TRUE;
 +        while (bFirstIterate || (bIterations && iterate.bIterate))
 +        {
 +            /* We now restore these vectors to redo the calculation with improved extended variables */    
 +            if (bIterations) 
 +            { 
 +                copy_coupling_state(bufstate,state,ekind_save,ekind,&(ir->opts));
 +            }
 +
 +            /* We make the decision to break or not -after- the calculation of Ekin and Pressure,
 +               so scroll down for that logic */
 +            
 +            /* #########   START SECOND UPDATE STEP ################# */
 +            /* Box is changed in update() when we do pressure coupling,
 +             * but we should still use the old box for energy corrections and when
 +             * writing it to the energy file, so it matches the trajectory files for
 +             * the same timestep above. Make a copy in a separate array.
 +             */
 +            copy_mat(state->box,lastbox);
 +
 +            bOK = TRUE;
 +            if (!(bRerunMD && !rerun_fr.bV && !bForceUpdate))
 +            {
 +                wallcycle_start(wcycle,ewcUPDATE);
 +                dvdl = 0;
 +                /* UPDATE PRESSURE VARIABLES IN TROTTER FORMULATION WITH CONSTRAINTS */
 +                if (bTrotter) 
 +                {
 +                    if (bIterations && iterate.bIterate) 
 +                    {
 +                        if (bFirstIterate) 
 +                        {
 +                            scalevir = 1;
 +                        }
 +                        else 
 +                        {
 +                            /* we use a new value of scalevir to converge the iterations faster */
 +                            scalevir = tracevir/trace(shake_vir);
 +                        }
 +                        msmul(shake_vir,scalevir,shake_vir); 
 +                        m_add(force_vir,shake_vir,total_vir);
 +                        clear_mat(shake_vir);
 +                    }
 +                    trotter_update(ir,step,ekind,enerd,state,total_vir,mdatoms,&MassQ,trotter_seq,ettTSEQ3);
 +                /* We can only do Berendsen coupling after we have summed
 +                 * the kinetic energy or virial. Since the happens
 +                 * in global_state after update, we should only do it at
 +                 * step % nstlist = 1 with bGStatEveryStep=FALSE.
 +                 */
 +                }
 +                else 
 +                {
 +                    update_tcouple(fplog,step,ir,state,ekind,wcycle,upd,&MassQ,mdatoms);
 +                    update_pcouple(fplog,step,ir,state,pcoupl_mu,M,wcycle,
 +                                   upd,bInitStep);
 +                }
 +
 +                if (bVV)
 +                {
 +                    bUpdateDoLR = (fr->bTwinRange && do_per_step(step,ir->nstcalclr));
 +
 +                    /* velocity half-step update */
 +                    update_coords(fplog,step,ir,mdatoms,state,fr->bMolPBC,f,
 +                                  bUpdateDoLR,fr->f_twin,fcd,
 +                                  ekind,M,wcycle,upd,FALSE,etrtVELOCITY2,
 +                                  cr,nrnb,constr,&top->idef);
 +                }
 +
 +                /* Above, initialize just copies ekinh into ekin,
 +                 * it doesn't copy position (for VV),
 +                 * and entire integrator for MD.
 +                 */
 +                
 +                if (ir->eI==eiVVAK) 
 +                {
 +                    copy_rvecn(state->x,cbuf,0,state->natoms);
 +                }
 +                bUpdateDoLR = (fr->bTwinRange && do_per_step(step,ir->nstcalclr));
 +
 +                update_coords(fplog,step,ir,mdatoms,state,fr->bMolPBC,f,
 +                              bUpdateDoLR,fr->f_twin,fcd,
 +                              ekind,M,wcycle,upd,bInitStep,etrtPOSITION,cr,nrnb,constr,&top->idef);
 +                wallcycle_stop(wcycle,ewcUPDATE);
 +
 +                update_constraints(fplog,step,&dvdl,ir,ekind,mdatoms,state,
 +                                   fr->bMolPBC,graph,f,
 +                                   &top->idef,shake_vir,force_vir,
 +                                   cr,nrnb,wcycle,upd,constr,
 +                                   bInitStep,FALSE,bCalcVir,state->veta);  
 +                
 +                if (ir->eI==eiVVAK) 
 +                {
 +                    /* erase F_EKIN and F_TEMP here? */
 +                    /* just compute the kinetic energy at the half step to perform a trotter step */
 +                    compute_globals(fplog,gstat,cr,ir,fr,ekind,state,state_global,mdatoms,nrnb,vcm,
 +                                    wcycle,enerd,force_vir,shake_vir,total_vir,pres,mu_tot,
 +                                    constr,NULL,FALSE,lastbox,
 +                                    top_global,&pcurr,top_global->natoms,&bSumEkinhOld,
 +                                    cglo_flags | CGLO_TEMPERATURE    
 +                        );
 +                    wallcycle_start(wcycle,ewcUPDATE);
 +                    trotter_update(ir,step,ekind,enerd,state,total_vir,mdatoms,&MassQ,trotter_seq,ettTSEQ4);            
 +                    /* now we know the scaling, we can compute the positions again again */
 +                    copy_rvecn(cbuf,state->x,0,state->natoms);
 +
 +                    bUpdateDoLR = (fr->bTwinRange && do_per_step(step,ir->nstcalclr));
 +
 +                    update_coords(fplog,step,ir,mdatoms,state,fr->bMolPBC,f,
 +                                  bUpdateDoLR,fr->f_twin,fcd,
 +                                  ekind,M,wcycle,upd,bInitStep,etrtPOSITION,cr,nrnb,constr,&top->idef);
 +                    wallcycle_stop(wcycle,ewcUPDATE);
 +
 +                    /* do we need an extra constraint here? just need to copy out of state->v to upd->xp? */
 +                    /* are the small terms in the shake_vir here due
 +                     * to numerical errors, or are they important
 +                     * physically? I'm thinking they are just errors, but not completely sure. 
 +                     * For now, will call without actually constraining, constr=NULL*/
 +                    update_constraints(fplog,step,&dvdl,ir,ekind,mdatoms,
 +                                       state,fr->bMolPBC,graph,f,
 +                                       &top->idef,tmp_vir,force_vir,
 +                                       cr,nrnb,wcycle,upd,NULL,
 +                                       bInitStep,FALSE,bCalcVir,
 +                                       state->veta);  
 +                }
 +                if (!bOK && !bFFscan) 
 +                {
 +                    gmx_fatal(FARGS,"Constraint error: Shake, Lincs or Settle could not solve the constrains");
 +                }
 +                
 +                if (fr->bSepDVDL && fplog && do_log) 
 +                {
 +                    fprintf(fplog,sepdvdlformat,"Constraint dV/dl",0.0,dvdl);
 +                }
 +                enerd->term[F_DVDL_BONDED] += dvdl;
 +            } 
 +            else if (graph) 
 +            {
 +                /* Need to unshift here */
 +                unshift_self(graph,state->box,state->x);
 +            }
 +
 +            if (vsite != NULL) 
 +            {
 +                wallcycle_start(wcycle,ewcVSITECONSTR);
 +                if (graph != NULL) 
 +                {
 +                    shift_self(graph,state->box,state->x);
 +                }
 +                construct_vsites(fplog,vsite,state->x,nrnb,ir->delta_t,state->v,
 +                                 top->idef.iparams,top->idef.il,
 +                                 fr->ePBC,fr->bMolPBC,graph,cr,state->box);
 +                
 +                if (graph != NULL) 
 +                {
 +                    unshift_self(graph,state->box,state->x);
 +                }
 +                wallcycle_stop(wcycle,ewcVSITECONSTR);
 +            }
 +            
 +            /* ############## IF NOT VV, Calculate globals HERE, also iterate constraints ############ */
 +            /* With Leap-Frog we can skip compute_globals at
 +             * non-communication steps, but we need to calculate
 +             * the kinetic energy one step before communication.
 +             */
 +            if (bGStat || do_per_step(step+1,nstglobalcomm) ||
 +                EI_VV(ir->eI))
 +            {
 +                if (ir->nstlist == -1 && bFirstIterate)
 +                {
 +                    gs.sig[eglsNABNSB] = nlh.nabnsb;
 +                }
 +                compute_globals(fplog,gstat,cr,ir,fr,ekind,state,state_global,mdatoms,nrnb,vcm,
 +                                wcycle,enerd,force_vir,shake_vir,total_vir,pres,mu_tot,
 +                                constr,
 +                                bFirstIterate ? &gs : NULL, 
 +                                (step_rel % gs.nstms == 0) && 
 +                                (multisim_nsteps<0 || (step_rel<multisim_nsteps)),
 +                                lastbox,
 +                                top_global,&pcurr,top_global->natoms,&bSumEkinhOld,
 +                                cglo_flags 
-         if (!bVV)
++                                | (!EI_VV(ir->eI) || bRerunMD ? CGLO_ENERGY : 0)
 +                                | (!EI_VV(ir->eI) && bStopCM ? CGLO_STOPCM : 0)
 +                                | (!EI_VV(ir->eI) ? CGLO_TEMPERATURE : 0) 
 +                                | (!EI_VV(ir->eI) || bRerunMD ? CGLO_PRESSURE : 0) 
 +                                | (bIterations && iterate.bIterate ? CGLO_ITERATE : 0) 
 +                                | (bFirstIterate ? CGLO_FIRSTITERATE : 0)
 +                                | CGLO_CONSTRAINT 
 +                    );
 +                if (ir->nstlist == -1 && bFirstIterate)
 +                {
 +                    nlh.nabnsb = gs.set[eglsNABNSB];
 +                    gs.set[eglsNABNSB] = 0;
 +                }
 +            }
 +            /* bIterate is set to keep it from eliminating the old ekin kinetic energy terms */
 +            /* #############  END CALC EKIN AND PRESSURE ################# */
 +        
 +            /* Note: this is OK, but there are some numerical precision issues with using the convergence of
 +               the virial that should probably be addressed eventually. state->veta has better properies,
 +               but what we actually need entering the new cycle is the new shake_vir value. Ideally, we could
 +               generate the new shake_vir, but test the veta value for convergence.  This will take some thought. */
 +
 +            if (bIterations && 
 +                done_iterating(cr,fplog,step,&iterate,bFirstIterate,
 +                               trace(shake_vir),&tracevir)) 
 +            {
 +                break;
 +            }
 +            bFirstIterate = FALSE;
 +        }
 +
 +        /* only add constraint dvdl after constraints */
 +        enerd->term[F_DVDL_BONDED] += dvdl;
-                 state->lambda[i] = ir->fepvals->all_lambda[i][lamnew];
++        if (!bVV || bRerunMD)
 +        {
 +            /* sum up the foreign energy and dhdl terms for md and sd. currently done every step so that dhdl is correct in the .edr */
 +            sum_dhdl(enerd,state->lambda,ir->fepvals);
 +        }
 +        update_box(fplog,step,ir,mdatoms,state,graph,f,
 +                   ir->nstlist==-1 ? &nlh.scale_tot : NULL,pcoupl_mu,nrnb,wcycle,upd,bInitStep,FALSE);
 +        
 +        /* ################# END UPDATE STEP 2 ################# */
 +        /* #### We now have r(t+dt) and v(t+dt/2)  ############# */
 +    
 +        /* The coordinates (x) were unshifted in update */
 +        if (bFFscan && (shellfc==NULL || bConverged))
 +        {
 +            if (print_forcefield(fplog,enerd->term,mdatoms->homenr,
 +                                 f,NULL,xcopy,
 +                                 &(top_global->mols),mdatoms->massT,pres))
 +            {
 +                gmx_finalize_par();
 +
 +                fprintf(stderr,"\n");
 +                exit(0);
 +            }
 +        }
 +        if (!bGStat)
 +        {
 +            /* We will not sum ekinh_old,                                                            
 +             * so signal that we still have to do it.                                                
 +             */
 +            bSumEkinhOld = TRUE;
 +        }
 +        
 +        if (bTCR)
 +        {
 +            /* Only do GCT when the relaxation of shells (minimization) has converged,
 +             * otherwise we might be coupling to bogus energies. 
 +             * In parallel we must always do this, because the other sims might
 +             * update the FF.
 +             */
 +
 +            /* Since this is called with the new coordinates state->x, I assume
 +             * we want the new box state->box too. / EL 20040121
 +             */
 +            do_coupling(fplog,oenv,nfile,fnm,tcr,t,step,enerd->term,fr,
 +                        ir,MASTER(cr),
 +                        mdatoms,&(top->idef),mu_aver,
 +                        top_global->mols.nr,cr,
 +                        state->box,total_vir,pres,
 +                        mu_tot,state->x,f,bConverged);
 +            debug_gmx();
 +        }
 +
 +        /* #########  BEGIN PREPARING EDR OUTPUT  ###########  */
 +        
 +        /* use the directly determined last velocity, not actually the averaged half steps */
 +        if (bTrotter && ir->eI==eiVV) 
 +        {
 +            enerd->term[F_EKIN] = last_ekin;
 +        }
 +        enerd->term[F_ETOT] = enerd->term[F_EPOT] + enerd->term[F_EKIN];
 +        
 +        if (bVV)
 +        {
 +            enerd->term[F_ECONSERVED] = enerd->term[F_ETOT] + saved_conserved_quantity;
 +        }
 +        else 
 +        {
 +            enerd->term[F_ECONSERVED] = enerd->term[F_ETOT] + compute_conserved_from_auxiliary(ir,state,&MassQ);
 +        }
 +        /* Check for excessively large energies */
 +        if (bIonize) 
 +        {
 +#ifdef GMX_DOUBLE
 +            real etot_max = 1e200;
 +#else
 +            real etot_max = 1e30;
 +#endif
 +            if (fabs(enerd->term[F_ETOT]) > etot_max) 
 +            {
 +                fprintf(stderr,"Energy too large (%g), giving up\n",
 +                        enerd->term[F_ETOT]);
 +            }
 +        }
 +        /* #########  END PREPARING EDR OUTPUT  ###########  */
 +        
 +        /* Time for performance */
 +        if (((step % stepout) == 0) || bLastStep) 
 +        {
 +            runtime_upd_proc(runtime);
 +        }
 +        
 +        /* Output stuff */
 +        if (MASTER(cr))
 +        {
 +            gmx_bool do_dr,do_or;
 +            
 +            if (fplog && do_log && bDoExpanded)
 +            {
 +                /* only needed if doing expanded ensemble */
 +                PrintFreeEnergyInfoToFile(fplog,ir->fepvals,ir->expandedvals,ir->bSimTemp?ir->simtempvals:NULL,
 +                                          &df_history,state->fep_state,ir->nstlog,step);
 +            }
 +            if (!(bStartingFromCpt && (EI_VV(ir->eI)))) 
 +            {
 +                if (bCalcEner)
 +                {
 +                    upd_mdebin(mdebin,bDoDHDL, TRUE,
 +                               t,mdatoms->tmass,enerd,state,
 +                               ir->fepvals,ir->expandedvals,lastbox,
 +                               shake_vir,force_vir,total_vir,pres,
 +                               ekind,mu_tot,constr);
 +                }
 +                else
 +                {
 +                    upd_mdebin_step(mdebin);
 +                }
 +                
 +                do_dr  = do_per_step(step,ir->nstdisreout);
 +                do_or  = do_per_step(step,ir->nstorireout);
 +                
 +                print_ebin(outf->fp_ene,do_ene,do_dr,do_or,do_log?fplog:NULL,
 +                           step,t,
 +                           eprNORMAL,bCompact,mdebin,fcd,groups,&(ir->opts));
 +            }
 +            if (ir->ePull != epullNO)
 +            {
 +                pull_print_output(ir->pull,step,t);
 +            }
 +            
 +            if (do_per_step(step,ir->nstlog))
 +            {
 +                if(fflush(fplog) != 0)
 +                {
 +                    gmx_fatal(FARGS,"Cannot flush logfile - maybe you are out of disk space?");
 +                }
 +            }
 +        }
 +        if (bDoExpanded)
 +        {
 +            /* Have to do this part after outputting the logfile and the edr file */
 +            state->fep_state = lamnew;
 +            for (i=0;i<efptNR;i++)
 +            {
++                state_global->lambda[i] = ir->fepvals->all_lambda[i][lamnew];
 +            }
 +        }
 +        /* Remaining runtime */
 +        if (MULTIMASTER(cr) && (do_verbose || gmx_got_usr_signal()) && !bPMETuneRunning)
 +        {
 +            if (shellfc) 
 +            {
 +                fprintf(stderr,"\n");
 +            }
 +            print_time(stderr,runtime,step,ir,cr);
 +        }
 +
 +        /* Replica exchange */
 +        bExchanged = FALSE;
 +        if ((repl_ex_nst > 0) && (step > 0) && !bLastStep &&
 +            do_per_step(step,repl_ex_nst)) 
 +        {
 +            bExchanged = replica_exchange(fplog,cr,repl_ex,
 +                                          state_global,enerd,
 +                                          state,step,t);
 +
 +            if (bExchanged && DOMAINDECOMP(cr)) 
 +            {
 +                dd_partition_system(fplog,step,cr,TRUE,1,
 +                                    state_global,top_global,ir,
 +                                    state,&f,mdatoms,top,fr,
 +                                    vsite,shellfc,constr,
 +                                    nrnb,wcycle,FALSE);
 +            }
 +        }
 +        
 +        bFirstStep = FALSE;
 +        bInitStep = FALSE;
 +        bStartingFromCpt = FALSE;
 +
 +        /* #######  SET VARIABLES FOR NEXT ITERATION IF THEY STILL NEED IT ###### */
 +        /* With all integrators, except VV, we need to retain the pressure
 +         * at the current step for coupling at the next step.
 +         */
 +        if ((state->flags & (1<<estPRES_PREV)) &&
 +            (bGStatEveryStep ||
 +             (ir->nstpcouple > 0 && step % ir->nstpcouple == 0)))
 +        {
 +            /* Store the pressure in t_state for pressure coupling
 +             * at the next MD step.
 +             */
 +            copy_mat(pres,state->pres_prev);
 +        }
 +        
 +        /* #######  END SET VARIABLES FOR NEXT ITERATION ###### */
 +
 +        if ( (membed!=NULL) && (!bLastStep) )
 +        {
 +            rescale_membed(step_rel,membed,state_global->x);
 +        }
 +
 +        if (bRerunMD) 
 +        {
 +            if (MASTER(cr))
 +            {
 +                /* read next frame from input trajectory */
 +                bNotLastFrame = read_next_frame(oenv,status,&rerun_fr);
 +            }
 +
 +            if (PAR(cr))
 +            {
 +                rerun_parallel_comm(cr,&rerun_fr,&bNotLastFrame);
 +            }
 +        }
 +        
 +        if (!bRerunMD || !rerun_fr.bStep)
 +        {
 +            /* increase the MD step number */
 +            step++;
 +            step_rel++;
 +        }
 +        
 +        cycles = wallcycle_stop(wcycle,ewcSTEP);
 +        if (DOMAINDECOMP(cr) && wcycle)
 +        {
 +            dd_cycles_add(cr->dd,cycles,ddCyclStep);
 +        }
 +
 +        if (bPMETuneRunning || bPMETuneTry)
 +        {
 +            /* PME grid + cut-off optimization with GPUs or PME nodes */
 +
 +            /* Count the total cycles over the last steps */
 +            cycles_pmes += cycles;
 +
 +            /* We can only switch cut-off at NS steps */
 +            if (step % ir->nstlist == 0)
 +            {
 +                /* PME grid + cut-off optimization with GPUs or PME nodes */
 +                if (bPMETuneTry)
 +                {
 +                    if (DDMASTER(cr->dd))
 +                    {
 +                        /* PME node load is too high, start tuning */
 +                        bPMETuneRunning = (dd_pme_f_ratio(cr->dd) >= 1.05);
 +                    }
 +                    dd_bcast(cr->dd,sizeof(gmx_bool),&bPMETuneRunning);
 +
 +                    if (bPMETuneRunning || step_rel > ir->nstlist*50)
 +                    {
 +                        bPMETuneTry     = FALSE;
 +                    }
 +                }
 +                if (bPMETuneRunning)
 +                {
 +                    /* init_step might not be a multiple of nstlist,
 +                     * but the first cycle is always skipped anyhow.
 +                     */
 +                    bPMETuneRunning =
 +                        pme_load_balance(pme_loadbal,cr,
 +                                         (bVerbose && MASTER(cr)) ? stderr : NULL,
 +                                         fplog,
 +                                         ir,state,cycles_pmes,
 +                                         fr->ic,fr->nbv,&fr->pmedata,
 +                                         step);
 +
 +                    /* Update constants in forcerec/inputrec to keep them in sync with fr->ic */
 +                    fr->ewaldcoeff = fr->ic->ewaldcoeff;
 +                    fr->rlist      = fr->ic->rlist;
 +                    fr->rlistlong  = fr->ic->rlistlong;
 +                    fr->rcoulomb   = fr->ic->rcoulomb;
 +                    fr->rvdw       = fr->ic->rvdw;
 +                }
 +                cycles_pmes = 0;
 +            }
 +        }
 +
 +        if (step_rel == wcycle_get_reset_counters(wcycle) ||
 +            gs.set[eglsRESETCOUNTERS] != 0)
 +        {
 +            /* Reset all the counters related to performance over the run */
 +            reset_all_counters(fplog,cr,step,&step_rel,ir,wcycle,nrnb,runtime,
 +                               fr->nbv != NULL && fr->nbv->bUseGPU ? fr->nbv->cu_nbv : NULL);
 +            wcycle_set_reset_counters(wcycle,-1);
 +            /* Correct max_hours for the elapsed time */
 +            max_hours -= run_time/(60.0*60.0);
 +            bResetCountersHalfMaxH = FALSE;
 +            gs.set[eglsRESETCOUNTERS] = 0;
 +        }
 +
 +    }
 +    /* End of main MD loop */
 +    debug_gmx();
 +    
 +    /* Stop the time */
 +    runtime_end(runtime);
 +    
 +    if (bRerunMD && MASTER(cr))
 +    {
 +        close_trj(status);
 +    }
 +    
 +    if (!(cr->duty & DUTY_PME))
 +    {
 +        /* Tell the PME only node to finish */
 +        gmx_pme_send_finish(cr);
 +    }
 +    
 +    if (MASTER(cr))
 +    {
 +        if (ir->nstcalcenergy > 0 && !bRerunMD) 
 +        {
 +            print_ebin(outf->fp_ene,FALSE,FALSE,FALSE,fplog,step,t,
 +                       eprAVER,FALSE,mdebin,fcd,groups,&(ir->opts));
 +        }
 +    }
 +
 +    done_mdoutf(outf);
 +
 +    debug_gmx();
 +
 +    if (ir->nstlist == -1 && nlh.nns > 0 && fplog)
 +    {
 +        fprintf(fplog,"Average neighborlist lifetime: %.1f steps, std.dev.: %.1f steps\n",nlh.s1/nlh.nns,sqrt(nlh.s2/nlh.nns - sqr(nlh.s1/nlh.nns)));
 +        fprintf(fplog,"Average number of atoms that crossed the half buffer length: %.1f\n\n",nlh.ab/nlh.nns);
 +    }
 +
 +    if (pme_loadbal != NULL)
 +    {
 +        pme_loadbal_done(pme_loadbal,fplog);
 +    }
 +
 +    if (shellfc && fplog)
 +    {
 +        fprintf(fplog,"Fraction of iterations that converged:           %.2f %%\n",
 +                (nconverged*100.0)/step_rel);
 +        fprintf(fplog,"Average number of force evaluations per MD step: %.2f\n\n",
 +                tcount/step_rel);
 +    }
 +    
 +    if (repl_ex_nst > 0 && MASTER(cr))
 +    {
 +        print_replica_exchange_statistics(fplog,repl_ex);
 +    }
 +    
 +    runtime->nsteps_done = step_rel;
 +
 +   return 0;
 +}
index 0f24b8878afad8c850ab0414b749efe2a8f1fbfb,0000000000000000000000000000000000000000..43f9d0112c5ccda13a191df842213a561f784b03
mode 100644,000000..100644
--- /dev/null
@@@ -1,808 -1,0 +1,808 @@@
-     "menu of the WHAT IF program. [TT]mdrun[tt] produces a [TT].edo[tt] file that",
 +/*  -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include "typedefs.h"
 +#include "macros.h"
 +#include "copyrite.h"
 +#include "main.h"
 +#include "statutil.h"
 +#include "smalloc.h"
 +#include "futil.h"
 +#include "smalloc.h"
 +#include "edsam.h"
 +#include "mdrun.h"
 +#include "xmdrun.h"
 +#include "checkpoint.h"
 +#ifdef GMX_THREAD_MPI
 +#include "thread_mpi.h"
 +#endif
 +
 +/* afm stuf */
 +#include "pull.h"
 +
 +int cmain(int argc,char *argv[])
 +{
 +  const char *desc[] = {
 + #ifdef GMX_OPENMM
 +    "This is an experimental release of GROMACS for accelerated",
 +      "Molecular Dynamics simulations on GPU processors. Support is provided",
 +      "by the OpenMM library (https://simtk.org/home/openmm).[PAR]",
 +      "*Warning*[BR]",
 +      "This release is targeted at developers and advanced users and",
 +      "care should be taken before production use. The following should be",
 +      "noted before using the program:[PAR]",
 +      " * The current release runs only on modern nVidia GPU hardware with CUDA support.",
 +      "Make sure that the necessary CUDA drivers and libraries for your operating system",
 +      "are already installed. The CUDA SDK also should be installed in order to compile",
 +      "the program from source (http://www.nvidia.com/object/cuda_home.html).[PAR]",
 +      " * Multiple GPU cards are not supported.[PAR]",
 +      " * Only a small subset of the GROMACS features and options are supported on the GPUs.",
 +      "See below for a detailed list.[PAR]",
 +      " * Consumer level GPU cards are known to often have problems with faulty memory.",
 +      "It is recommended that a full memory check of the cards is done at least once",
 +      "(for example, using the memtest=full option).",
 +      "A partial memory check (for example, memtest=15) before and",
 +      "after the simulation run would help spot",
 +      "problems resulting from processor overheating.[PAR]",
 +      " * The maximum size of the simulated systems depends on the available",
 +      "GPU memory,for example, a GTX280 with 1GB memory has been tested with systems",
 +      "of up to about 100,000 atoms.[PAR]",
 +      " * In order to take a full advantage of the GPU platform features, many algorithms",
 +      "have been implemented in a very different way than they are on the CPUs.",
 +      "Therefore numercal correspondence between properties of the state of",
 +      "simulated systems should not be expected. Moreover, the values will likely vary",
 +      "when simulations are done on different GPU hardware.[PAR]",
 +      " * Frequent retrieval of system state information such as",
 +      "trajectory coordinates and energies can greatly influence the performance",
 +      "of the program due to slow CPU<->GPU memory transfer speed.[PAR]",
 +      " * MD algorithms are complex, and although the Gromacs code is highly tuned for them,",
 +      "they often do not translate very well onto the streaming architetures.",
 +      "Realistic expectations about the achievable speed-up from test with GTX280:",
 +      "For small protein systems in implicit solvent using all-vs-all kernels the acceleration",
 +      "can be as high as 20 times, but in most other setups involving cutoffs and PME the",
 +      "acceleration is usually only ~4 times relative to a 3GHz CPU.[PAR]",
 +      "Supported features:[PAR]",
 +      " * Integrators: md/md-vv/md-vv-avek, sd/sd1 and bd.\n",
 +      " * Long-range interactions (option coulombtype): Reaction-Field, Ewald, PME, and cut-off (for Implicit Solvent only)\n",
 +      " * Temperature control: Supported only with the md/md-vv/md-vv-avek, sd/sd1 and bd integrators.\n",
 +      " * Pressure control: Supported.\n",
 +      " * Implicit solvent: Supported.\n",
 +      "A detailed description can be found on the GROMACS website:\n",
 +      "http://www.gromacs.org/gpu[PAR]",
 +/* From the original mdrun documentaion */
 +    "The [TT]mdrun[tt] program reads the run input file ([TT]-s[tt])",
 +    "and distributes the topology over nodes if needed.",
 +    "[TT]mdrun[tt] produces at least four output files.",
 +    "A single log file ([TT]-g[tt]) is written, unless the option",
 +    "[TT]-seppot[tt] is used, in which case each node writes a log file.",
 +    "The trajectory file ([TT]-o[tt]), contains coordinates, velocities and",
 +    "optionally forces.",
 +    "The structure file ([TT]-c[tt]) contains the coordinates and",
 +    "velocities of the last step.",
 +    "The energy file ([TT]-e[tt]) contains energies, the temperature,",
 +    "pressure, etc, a lot of these things are also printed in the log file.",
 +    "Optionally coordinates can be written to a compressed trajectory file",
 +    "([TT]-x[tt]).[PAR]",
 +/* openmm specific information */
 +      "Usage with OpenMM:[BR]",
 +      "[TT]mdrun -device \"OpenMM:platform=Cuda,memtest=15,deviceid=0,force-device=no\"[tt][PAR]",
 +      "Options:[PAR]",
 +      "      [TT]platform[tt] = Cuda\t\t:\tThe only available value. OpenCL support will be available in future.\n",
 +      "      [TT]memtest[tt] = 15\t\t:\tRun a partial, random GPU memory test for the given amount of seconds. A full test",
 +      "(recommended!) can be run with \"memtest=full\". Memory testing can be disabled with \"memtest=off\".\n",
 +      "      [TT]deviceid[tt] = 0\t\t:\tSpecify the target device when multiple cards are present.",
 +      "Only one card can be used at any given time though.\n",
 +      "      [TT]force-device[tt] = no\t\t:\tIf set to \"yes\" [TT]mdrun[tt]  will be forced to execute on",
 +      "hardware that is not officially supported. GPU acceleration can also be achieved on older",
 +      "but Cuda capable cards, although the simulation might be too slow, and the memory limits too strict.",
 +#else
 +    "The [TT]mdrun[tt] program is the main computational chemistry engine",
 +    "within GROMACS. Obviously, it performs Molecular Dynamics simulations,",
 +    "but it can also perform Stochastic Dynamics, Energy Minimization,",
 +    "test particle insertion or (re)calculation of energies.",
 +    "Normal mode analysis is another option. In this case [TT]mdrun[tt]",
 +    "builds a Hessian matrix from single conformation.",
 +    "For usual Normal Modes-like calculations, make sure that",
 +    "the structure provided is properly energy-minimized.",
 +    "The generated matrix can be diagonalized by [TT]g_nmeig[tt].[PAR]",
 +    "The [TT]mdrun[tt] program reads the run input file ([TT]-s[tt])",
 +    "and distributes the topology over nodes if needed.",
 +    "[TT]mdrun[tt] produces at least four output files.",
 +    "A single log file ([TT]-g[tt]) is written, unless the option",
 +    "[TT]-seppot[tt] is used, in which case each node writes a log file.",
 +    "The trajectory file ([TT]-o[tt]), contains coordinates, velocities and",
 +    "optionally forces.",
 +    "The structure file ([TT]-c[tt]) contains the coordinates and",
 +    "velocities of the last step.",
 +    "The energy file ([TT]-e[tt]) contains energies, the temperature,",
 +    "pressure, etc, a lot of these things are also printed in the log file.",
 +    "Optionally coordinates can be written to a compressed trajectory file",
 +    "([TT]-x[tt]).[PAR]",
 +    "The option [TT]-dhdl[tt] is only used when free energy calculation is",
 +    "turned on.[PAR]",
 +    "A simulation can be run in parallel using two different parallelization",
 +    "schemes: MPI parallelization and/or OpenMP thread parallelization.",
 +    "The MPI parallelization uses multiple processes when [TT]mdrun[tt] is",
 +    "compiled with a normal MPI library or threads when [TT]mdrun[tt] is",
 +    "compiled with the GROMACS built-in thread-MPI library. OpenMP threads",
 +    "are supported when mdrun is compiled with OpenMP. Full OpenMP support",
 +    "is only available with the Verlet cut-off scheme, with the (older)",
 +    "group scheme only PME-only processes can use OpenMP parallelization.",
 +    "In all cases [TT]mdrun[tt] will by default try to use all the available",
 +    "hardware resources. With a normal MPI library only the options",
 +    "[TT]-ntomp[tt] (with the Verlet cut-off scheme) and [TT]-ntomp_pme[tt],",
 +    "for PME-only processes, can be used to control the number of threads.",
 +    "With thread-MPI there are additional options [TT]-nt[tt], which sets",
 +    "the total number of threads, and [TT]-ntmpi[tt], which sets the number",
 +    "of thread-MPI threads.",
 +    "Note that using combined MPI+OpenMP parallelization is almost always",
 +    "slower than single parallelization, except at the scaling limit, where",
 +    "especially OpenMP parallelization of PME reduces the communication cost.",
 +    "OpenMP-only parallelization is much faster than MPI-only parallelization",
 +    "on a single CPU(-die). Since we currently don't have proper hardware",
 +    "topology detection, [TT]mdrun[tt] compiled with thread-MPI will only",
 +    "automatically use OpenMP-only parallelization when you use up to 4",
 +    "threads, up to 12 threads with Intel Nehalem/Westmere, or up to 16",
 +    "threads with Intel Sandy Bridge or newer CPUs. Otherwise MPI-only",
 +    "parallelization is used (except with GPUs, see below).",
 +    "[PAR]",
 +    "To quickly test the performance of the new Verlet cut-off scheme",
 +    "with old [TT].tpr[tt] files, either on CPUs or CPUs+GPUs, you can use",
 +    "the [TT]-testverlet[tt] option. This should not be used for production,",
 +    "since it can slightly modify potentials and it will remove charge groups",
 +    "making analysis difficult, as the [TT].tpr[tt] file will still contain",
 +    "charge groups. For production simulations it is highly recommended",
 +    "to specify [TT]cutoff-scheme = Verlet[tt] in the [TT].mdp[tt] file.",
 +    "[PAR]",
 +    "With GPUs (only supported with the Verlet cut-off scheme), the number",
 +    "of GPUs should match the number of MPI processes or MPI threads,",
 +    "excluding PME-only processes/threads. With thread-MPI the number",
 +    "of MPI threads will automatically be set to the number of GPUs detected.",
 +    "When you want to use a subset of the available GPUs, you can use",
 +    "the [TT]-gpu_id[tt] option, where GPU id's are passed as a string,",
 +    "e.g. 02 for using GPUs 0 and 2. When you want different GPU id's",
 +    "on different nodes of a compute cluster, use the GMX_GPU_ID environment",
 +    "variable instead. The format for GMX_GPU_ID is identical to ",
 +    "[TT]-gpu_id[tt], but an environment variable can have different values",
 +    "on different nodes of a cluster.",
 +    "[PAR]",
 +    "When using PME with separate PME nodes or with a GPU, the two major",
 +    "compute tasks, the non-bonded force calculation and the PME calculation",
 +    "run on different compute resources. If this load is not balanced,",
 +    "some of the resources will be idle part of time. With the Verlet",
 +    "cut-off scheme this load is automatically balanced when the PME load",
 +    "is too high (but not when it is too low). This is done by scaling",
 +    "the Coulomb cut-off and PME grid spacing by the same amount. In the first",
 +    "few hundred steps different settings are tried and the fastest is chosen",
 +    "for the rest of the simulation. This does not affect the accuracy of",
 +    "the results, but it does affect the decomposition of the Coulomb energy",
 +    "into particle and mesh contributions. The auto-tuning can be turned off",
 +    "with the option [TT]-notunepme[tt].",
 +    "[PAR]",
 +    "When compiled with OpenMP on Linux, [TT]mdrun[tt] pins threads to cores,",
 +    "as this usually results in significantly better performance.",
 +    "If you don't want this, use [TT]-nopin[tt].",
 +    "With Intel CPUs with hyper-threading enabled, you should pin",
 +    "consecutive threads to the same physical core for optimal",
 +    "performance when you use virtual cores. This is done automatically",
 +    "when you use more than half of the virtual cores. It can also be set",
 +    "manually with [TT]-pinht[tt], e.g. for running multiple simulations",
 +    "on one compute node.",
 +    "When running multiple mdrun (or other) simulations on the same physical",
 +    "node, some simulations need to start pinning from a non-zero core",
 +    "to avoid overloading cores; with [TT]-pinoffset[tt] you can specify",
 +    "the offset in (physical) cores for pinning.",
 +    "[PAR]",
 +    "When [TT]mdrun[tt] is started using MPI with more than 1 process",
 +    "or with thread-MPI with more than 1 thread, MPI parallelization is used.",
 +    "By default domain decomposition is used, unless the [TT]-pd[tt]",
 +    "option is set, which selects particle decomposition.",
 +    "[PAR]",
 +    "With domain decomposition, the spatial decomposition can be set",
 +    "with option [TT]-dd[tt]. By default [TT]mdrun[tt] selects a good decomposition.",
 +    "The user only needs to change this when the system is very inhomogeneous.",
 +    "Dynamic load balancing is set with the option [TT]-dlb[tt],",
 +    "which can give a significant performance improvement,",
 +    "especially for inhomogeneous systems. The only disadvantage of",
 +    "dynamic load balancing is that runs are no longer binary reproducible,",
 +    "but in most cases this is not important.",
 +    "By default the dynamic load balancing is automatically turned on",
 +    "when the measured performance loss due to load imbalance is 5% or more.",
 +    "At low parallelization these are the only important options",
 +    "for domain decomposition.",
 +    "At high parallelization the options in the next two sections",
 +    "could be important for increasing the performace.",
 +    "[PAR]",
 +    "When PME is used with domain decomposition, separate nodes can",
 +    "be assigned to do only the PME mesh calculation;",
 +    "this is computationally more efficient starting at about 12 nodes.",
 +    "The number of PME nodes is set with option [TT]-npme[tt],",
 +    "this can not be more than half of the nodes.",
 +    "By default [TT]mdrun[tt] makes a guess for the number of PME",
 +    "nodes when the number of nodes is larger than 11 or performance wise",
 +    "not compatible with the PME grid x dimension.",
 +    "But the user should optimize npme. Performance statistics on this issue",
 +    "are written at the end of the log file.",
 +    "For good load balancing at high parallelization, the PME grid x and y",
 +    "dimensions should be divisible by the number of PME nodes",
 +    "(the simulation will run correctly also when this is not the case).",
 +    "[PAR]",
 +    "This section lists all options that affect the domain decomposition.",
 +    "[PAR]",
 +    "Option [TT]-rdd[tt] can be used to set the required maximum distance",
 +    "for inter charge-group bonded interactions.",
 +    "Communication for two-body bonded interactions below the non-bonded",
 +    "cut-off distance always comes for free with the non-bonded communication.",
 +    "Atoms beyond the non-bonded cut-off are only communicated when they have",
 +    "missing bonded interactions; this means that the extra cost is minor",
 +    "and nearly indepedent of the value of [TT]-rdd[tt].",
 +    "With dynamic load balancing option [TT]-rdd[tt] also sets",
 +    "the lower limit for the domain decomposition cell sizes.",
 +    "By default [TT]-rdd[tt] is determined by [TT]mdrun[tt] based on",
 +    "the initial coordinates. The chosen value will be a balance",
 +    "between interaction range and communication cost.",
 +    "[PAR]",
 +    "When inter charge-group bonded interactions are beyond",
 +    "the bonded cut-off distance, [TT]mdrun[tt] terminates with an error message.",
 +    "For pair interactions and tabulated bonds",
 +    "that do not generate exclusions, this check can be turned off",
 +    "with the option [TT]-noddcheck[tt].",
 +    "[PAR]",
 +    "When constraints are present, option [TT]-rcon[tt] influences",
 +    "the cell size limit as well.",
 +    "Atoms connected by NC constraints, where NC is the LINCS order plus 1,",
 +    "should not be beyond the smallest cell size. A error message is",
 +    "generated when this happens and the user should change the decomposition",
 +    "or decrease the LINCS order and increase the number of LINCS iterations.",
 +    "By default [TT]mdrun[tt] estimates the minimum cell size required for P-LINCS",
 +    "in a conservative fashion. For high parallelization it can be useful",
 +    "to set the distance required for P-LINCS with the option [TT]-rcon[tt].",
 +    "[PAR]",
 +    "The [TT]-dds[tt] option sets the minimum allowed x, y and/or z scaling",
 +    "of the cells with dynamic load balancing. [TT]mdrun[tt] will ensure that",
 +    "the cells can scale down by at least this factor. This option is used",
 +    "for the automated spatial decomposition (when not using [TT]-dd[tt])",
 +    "as well as for determining the number of grid pulses, which in turn",
 +    "sets the minimum allowed cell size. Under certain circumstances",
 +    "the value of [TT]-dds[tt] might need to be adjusted to account for",
 +    "high or low spatial inhomogeneity of the system.",
 +    "[PAR]",
 +    "The option [TT]-gcom[tt] can be used to only do global communication",
 +    "every n steps.",
 +    "This can improve performance for highly parallel simulations",
 +    "where this global communication step becomes the bottleneck.",
 +    "For a global thermostat and/or barostat the temperature",
 +    "and/or pressure will also only be updated every [TT]-gcom[tt] steps.",
 +    "By default it is set to the minimum of nstcalcenergy and nstlist.[PAR]",
 +    "With [TT]-rerun[tt] an input trajectory can be given for which ",
 +    "forces and energies will be (re)calculated. Neighbor searching will be",
 +    "performed for every frame, unless [TT]nstlist[tt] is zero",
 +    "(see the [TT].mdp[tt] file).[PAR]",
 +    "ED (essential dynamics) sampling is switched on by using the [TT]-ei[tt]",
 +    "flag followed by an [TT].edi[tt] file.",
 +    "The [TT].edi[tt] file can be produced using options in the essdyn",
-     { efEDO, "-eo",     "sam",      ffOPTWR },
++    "menu of the WHAT IF program. [TT]mdrun[tt] produces a [TT].xvg[tt] output file that",
 +    "contains projections of positions, velocities and forces onto selected",
 +    "eigenvectors.[PAR]",
 +    "When user-defined potential functions have been selected in the",
 +    "[TT].mdp[tt] file the [TT]-table[tt] option is used to pass [TT]mdrun[tt]",
 +    "a formatted table with potential functions. The file is read from",
 +    "either the current directory or from the [TT]GMXLIB[tt] directory.",
 +    "A number of pre-formatted tables are presented in the [TT]GMXLIB[tt] dir,",
 +    "for 6-8, 6-9, 6-10, 6-11, 6-12 Lennard-Jones potentials with",
 +    "normal Coulomb.",
 +    "When pair interactions are present, a separate table for pair interaction",
 +    "functions is read using the [TT]-tablep[tt] option.[PAR]",
 +    "When tabulated bonded functions are present in the topology,",
 +    "interaction functions are read using the [TT]-tableb[tt] option.",
 +    "For each different tabulated interaction type the table file name is",
 +    "modified in a different way: before the file extension an underscore is",
 +    "appended, then a 'b' for bonds, an 'a' for angles or a 'd' for dihedrals",
 +    "and finally the table number of the interaction type.[PAR]",
 +    "The options [TT]-px[tt] and [TT]-pf[tt] are used for writing pull COM",
 +    "coordinates and forces when pulling is selected",
 +    "in the [TT].mdp[tt] file.[PAR]",
 +    "With [TT]-multi[tt] or [TT]-multidir[tt], multiple systems can be ",
 +    "simulated in parallel.",
 +    "As many input files/directories are required as the number of systems. ",
 +    "The [TT]-multidir[tt] option takes a list of directories (one for each ",
 +    "system) and runs in each of them, using the input/output file names, ",
 +    "such as specified by e.g. the [TT]-s[tt] option, relative to these ",
 +    "directories.",
 +    "With [TT]-multi[tt], the system number is appended to the run input ",
 +    "and each output filename, for instance [TT]topol.tpr[tt] becomes",
 +    "[TT]topol0.tpr[tt], [TT]topol1.tpr[tt] etc.",
 +    "The number of nodes per system is the total number of nodes",
 +    "divided by the number of systems.",
 +    "One use of this option is for NMR refinement: when distance",
 +    "or orientation restraints are present these can be ensemble averaged",
 +    "over all the systems.[PAR]",
 +    "With [TT]-replex[tt] replica exchange is attempted every given number",
 +    "of steps. The number of replicas is set with the [TT]-multi[tt] or ",
 +    "[TT]-multidir[tt] option, described above.",
 +    "All run input files should use a different coupling temperature,",
 +    "the order of the files is not important. The random seed is set with",
 +    "[TT]-reseed[tt]. The velocities are scaled and neighbor searching",
 +    "is performed after every exchange.[PAR]",
 +    "Finally some experimental algorithms can be tested when the",
 +    "appropriate options have been given. Currently under",
 +    "investigation are: polarizability and X-ray bombardments.",
 +    "[PAR]",
 +    "The option [TT]-membed[tt] does what used to be g_membed, i.e. embed",
 +    "a protein into a membrane. The data file should contain the options",
 +    "that where passed to g_membed before. The [TT]-mn[tt] and [TT]-mp[tt]",
 +    "both apply to this as well.",
 +    "[PAR]",
 +    "The option [TT]-pforce[tt] is useful when you suspect a simulation",
 +    "crashes due to too large forces. With this option coordinates and",
 +    "forces of atoms with a force larger than a certain value will",
 +    "be printed to stderr.",
 +    "[PAR]",
 +    "Checkpoints containing the complete state of the system are written",
 +    "at regular intervals (option [TT]-cpt[tt]) to the file [TT]-cpo[tt],",
 +    "unless option [TT]-cpt[tt] is set to -1.",
 +    "The previous checkpoint is backed up to [TT]state_prev.cpt[tt] to",
 +    "make sure that a recent state of the system is always available,",
 +    "even when the simulation is terminated while writing a checkpoint.",
 +    "With [TT]-cpnum[tt] all checkpoint files are kept and appended",
 +    "with the step number.",
 +    "A simulation can be continued by reading the full state from file",
 +    "with option [TT]-cpi[tt]. This option is intelligent in the way that",
 +    "if no checkpoint file is found, Gromacs just assumes a normal run and",
 +    "starts from the first step of the [TT].tpr[tt] file. By default the output",
 +    "will be appending to the existing output files. The checkpoint file",
 +    "contains checksums of all output files, such that you will never",
 +    "loose data when some output files are modified, corrupt or removed.",
 +    "There are three scenarios with [TT]-cpi[tt]:[PAR]",
 +    "[TT]*[tt] no files with matching names are present: new output files are written[PAR]",
 +    "[TT]*[tt] all files are present with names and checksums matching those stored",
 +    "in the checkpoint file: files are appended[PAR]",
 +    "[TT]*[tt] otherwise no files are modified and a fatal error is generated[PAR]",
 +    "With [TT]-noappend[tt] new output files are opened and the simulation",
 +    "part number is added to all output file names.",
 +    "Note that in all cases the checkpoint file itself is not renamed",
 +    "and will be overwritten, unless its name does not match",
 +    "the [TT]-cpo[tt] option.",
 +    "[PAR]",
 +    "With checkpointing the output is appended to previously written",
 +    "output files, unless [TT]-noappend[tt] is used or none of the previous",
 +    "output files are present (except for the checkpoint file).",
 +    "The integrity of the files to be appended is verified using checksums",
 +    "which are stored in the checkpoint file. This ensures that output can",
 +    "not be mixed up or corrupted due to file appending. When only some",
 +    "of the previous output files are present, a fatal error is generated",
 +    "and no old output files are modified and no new output files are opened.",
 +    "The result with appending will be the same as from a single run.",
 +    "The contents will be binary identical, unless you use a different number",
 +    "of nodes or dynamic load balancing or the FFT library uses optimizations",
 +    "through timing.",
 +    "[PAR]",
 +    "With option [TT]-maxh[tt] a simulation is terminated and a checkpoint",
 +    "file is written at the first neighbor search step where the run time",
 +    "exceeds [TT]-maxh[tt]*0.99 hours.",
 +    "[PAR]",
 +    "When [TT]mdrun[tt] receives a TERM signal, it will set nsteps to the current",
 +    "step plus one. When [TT]mdrun[tt] receives an INT signal (e.g. when ctrl+C is",
 +    "pressed), it will stop after the next neighbor search step ",
 +    "(with nstlist=0 at the next step).",
 +    "In both cases all the usual output will be written to file.",
 +    "When running with MPI, a signal to one of the [TT]mdrun[tt] processes",
 +    "is sufficient, this signal should not be sent to mpirun or",
 +    "the [TT]mdrun[tt] process that is the parent of the others.",
 +    "[PAR]",
 +    "When [TT]mdrun[tt] is started with MPI, it does not run niced by default."
 +#endif
 +  };
 +  t_commrec    *cr;
 +  t_filenm fnm[] = {
 +    { efTPX, NULL,      NULL,       ffREAD },
 +    { efTRN, "-o",      NULL,       ffWRITE },
 +    { efXTC, "-x",      NULL,       ffOPTWR },
 +    { efCPT, "-cpi",    NULL,       ffOPTRD },
 +    { efCPT, "-cpo",    NULL,       ffOPTWR },
 +    { efSTO, "-c",      "confout",  ffWRITE },
 +    { efEDR, "-e",      "ener",     ffWRITE },
 +    { efLOG, "-g",      "md",       ffWRITE },
 +    { efXVG, "-dhdl",   "dhdl",     ffOPTWR },
 +    { efXVG, "-field",  "field",    ffOPTWR },
 +    { efXVG, "-table",  "table",    ffOPTRD },
 +    { efXVG, "-tabletf", "tabletf",    ffOPTRD },
 +    { efXVG, "-tablep", "tablep",   ffOPTRD },
 +    { efXVG, "-tableb", "table",    ffOPTRD },
 +    { efTRX, "-rerun",  "rerun",    ffOPTRD },
 +    { efXVG, "-tpi",    "tpi",      ffOPTWR },
 +    { efXVG, "-tpid",   "tpidist",  ffOPTWR },
 +    { efEDI, "-ei",     "sam",      ffOPTRD },
++    { efXVG, "-eo",     "edsam",    ffOPTWR },
 +    { efGCT, "-j",      "wham",     ffOPTRD },
 +    { efGCT, "-jo",     "bam",      ffOPTWR },
 +    { efXVG, "-ffout",  "gct",      ffOPTWR },
 +    { efXVG, "-devout", "deviatie", ffOPTWR },
 +    { efXVG, "-runav",  "runaver",  ffOPTWR },
 +    { efXVG, "-px",     "pullx",    ffOPTWR },
 +    { efXVG, "-pf",     "pullf",    ffOPTWR },
 +    { efXVG, "-ro",     "rotation", ffOPTWR },
 +    { efLOG, "-ra",     "rotangles",ffOPTWR },
 +    { efLOG, "-rs",     "rotslabs", ffOPTWR },
 +    { efLOG, "-rt",     "rottorque",ffOPTWR },
 +    { efMTX, "-mtx",    "nm",       ffOPTWR },
 +    { efNDX, "-dn",     "dipole",   ffOPTWR },
 +    { efRND, "-multidir",NULL,      ffOPTRDMULT},
 +    { efDAT, "-membed", "membed",   ffOPTRD },
 +    { efTOP, "-mp",     "membed",   ffOPTRD },
 +    { efNDX, "-mn",     "membed",   ffOPTRD }
 +  };
 +#define NFILE asize(fnm)
 +
 +  /* Command line options ! */
 +  gmx_bool bCart        = FALSE;
 +  gmx_bool bPPPME       = FALSE;
 +  gmx_bool bPartDec     = FALSE;
 +  gmx_bool bDDBondCheck = TRUE;
 +  gmx_bool bDDBondComm  = TRUE;
 +  gmx_bool bTunePME     = TRUE;
 +  gmx_bool bTestVerlet  = FALSE;
 +  gmx_bool bVerbose     = FALSE;
 +  gmx_bool bCompact     = TRUE;
 +  gmx_bool bSepPot      = FALSE;
 +  gmx_bool bRerunVSite  = FALSE;
 +  gmx_bool bIonize      = FALSE;
 +  gmx_bool bConfout     = TRUE;
 +  gmx_bool bReproducible = FALSE;
 +    
 +  int  npme=-1;
 +  int  nmultisim=0;
 +  int  nstglobalcomm=-1;
 +  int  repl_ex_nst=0;
 +  int  repl_ex_seed=-1;
 +  int  repl_ex_nex=0;
 +  int  nstepout=100;
 +  int  resetstep=-1;
 +  int  nsteps=-2; /* the value -2 means that the mdp option will be used */
 +  
 +  rvec realddxyz={0,0,0};
 +  const char *ddno_opt[ddnoNR+1] =
 +    { NULL, "interleave", "pp_pme", "cartesian", NULL };
 +  const char *dddlb_opt[] =
 +    { NULL, "auto", "no", "yes", NULL };
 +  const char *nbpu_opt[] =
 +    { NULL, "auto", "cpu", "gpu", "gpu_cpu", NULL };
 +  real rdd=0.0,rconstr=0.0,dlb_scale=0.8,pforce=-1;
 +  char *ddcsx=NULL,*ddcsy=NULL,*ddcsz=NULL;
 +  real cpt_period=15.0,max_hours=-1;
 +  gmx_bool bAppendFiles=TRUE;
 +  gmx_bool bKeepAndNumCPT=FALSE;
 +  gmx_bool bResetCountersHalfWay=FALSE;
 +  output_env_t oenv=NULL;
 +  const char *deviceOptions = "";
 +
 +  gmx_hw_opt_t hw_opt={0,0,0,0,TRUE,FALSE,0,NULL};
 +
 +  t_pargs pa[] = {
 +
 +    { "-pd",      FALSE, etBOOL,{&bPartDec},
 +      "Use particle decompostion" },
 +    { "-dd",      FALSE, etRVEC,{&realddxyz},
 +      "Domain decomposition grid, 0 is optimize" },
 +    { "-ddorder", FALSE, etENUM, {ddno_opt},
 +      "DD node order" },
 +    { "-npme",    FALSE, etINT, {&npme},
 +      "Number of separate nodes to be used for PME, -1 is guess" },
 +    { "-nt",      FALSE, etINT, {&hw_opt.nthreads_tot},
 +      "Total number of threads to start (0 is guess)" },
 +    { "-ntmpi",   FALSE, etINT, {&hw_opt.nthreads_tmpi},
 +      "Number of thread-MPI threads to start (0 is guess)" },
 +    { "-ntomp",   FALSE, etINT, {&hw_opt.nthreads_omp},
 +      "Number of OpenMP threads per MPI process/thread to start (0 is guess)" },
 +    { "-ntomp_pme", FALSE, etINT, {&hw_opt.nthreads_omp_pme},
 +      "Number of OpenMP threads per MPI process/thread to start (0 is -ntomp)" },
 +    { "-pin",     FALSE, etBOOL, {&hw_opt.bThreadPinning},
 +      "Pin OpenMP threads to cores" },
 +    { "-pinht",   FALSE, etBOOL, {&hw_opt.bPinHyperthreading},
 +      "Always pin threads to Hyper-Threading cores" },
 +    { "-pinoffset", FALSE, etINT, {&hw_opt.core_pinning_offset},
 +      "Core offset for pinning (for running multiple mdrun processes on a single physical node)" },
 +    { "-gpu_id",  FALSE, etSTR, {&hw_opt.gpu_id},
 +      "List of GPU id's to use" },
 +    { "-ddcheck", FALSE, etBOOL, {&bDDBondCheck},
 +      "Check for all bonded interactions with DD" },
 +    { "-ddbondcomm", FALSE, etBOOL, {&bDDBondComm},
 +      "HIDDENUse special bonded atom communication when [TT]-rdd[tt] > cut-off" },
 +    { "-rdd",     FALSE, etREAL, {&rdd},
 +      "The maximum distance for bonded interactions with DD (nm), 0 is determine from initial coordinates" },
 +    { "-rcon",    FALSE, etREAL, {&rconstr},
 +      "Maximum distance for P-LINCS (nm), 0 is estimate" },
 +    { "-dlb",     FALSE, etENUM, {dddlb_opt},
 +      "Dynamic load balancing (with DD)" },
 +    { "-dds",     FALSE, etREAL, {&dlb_scale},
 +      "Minimum allowed dlb scaling of the DD cell size" },
 +    { "-ddcsx",   FALSE, etSTR, {&ddcsx},
 +      "HIDDENThe DD cell sizes in x" },
 +    { "-ddcsy",   FALSE, etSTR, {&ddcsy},
 +      "HIDDENThe DD cell sizes in y" },
 +    { "-ddcsz",   FALSE, etSTR, {&ddcsz},
 +      "HIDDENThe DD cell sizes in z" },
 +    { "-gcom",    FALSE, etINT,{&nstglobalcomm},
 +      "Global communication frequency" },
 +    { "-nb",      FALSE, etENUM, {&nbpu_opt},
 +      "Calculate non-bonded interactions on" },
 +    { "-tunepme", FALSE, etBOOL, {&bTunePME},  
 +      "Optimize PME load between PP/PME nodes or GPU/CPU" },
 +    { "-testverlet", FALSE, etBOOL, {&bTestVerlet},
 +      "Test the Verlet non-bonded scheme" },
 +    { "-v",       FALSE, etBOOL,{&bVerbose},  
 +      "Be loud and noisy" },
 +    { "-compact", FALSE, etBOOL,{&bCompact},  
 +      "Write a compact log file" },
 +    { "-seppot",  FALSE, etBOOL, {&bSepPot},
 +      "Write separate V and dVdl terms for each interaction type and node to the log file(s)" },
 +    { "-pforce",  FALSE, etREAL, {&pforce},
 +      "Print all forces larger than this (kJ/mol nm)" },
 +    { "-reprod",  FALSE, etBOOL,{&bReproducible},  
 +      "Try to avoid optimizations that affect binary reproducibility" },
 +    { "-cpt",     FALSE, etREAL, {&cpt_period},
 +      "Checkpoint interval (minutes)" },
 +    { "-cpnum",   FALSE, etBOOL, {&bKeepAndNumCPT},
 +      "Keep and number checkpoint files" },
 +    { "-append",  FALSE, etBOOL, {&bAppendFiles},
 +      "Append to previous output files when continuing from checkpoint instead of adding the simulation part number to all file names" },
 +    { "-nsteps",  FALSE, etINT, {&nsteps},
 +      "Run this number of steps, overrides .mdp file option" },
 +    { "-maxh",   FALSE, etREAL, {&max_hours},
 +      "Terminate after 0.99 times this time (hours)" },
 +    { "-multi",   FALSE, etINT,{&nmultisim}, 
 +      "Do multiple simulations in parallel" },
 +    { "-replex",  FALSE, etINT, {&repl_ex_nst}, 
 +      "Attempt replica exchange periodically with this period (steps)" },
 +    { "-nex",  FALSE, etINT, {&repl_ex_nex},
 +      "Number of random exchanges to carry out each exchange interval (N^3 is one suggestion).  -nex zero or not specified gives neighbor replica exchange." },
 +    { "-reseed",  FALSE, etINT, {&repl_ex_seed}, 
 +      "Seed for replica exchange, -1 is generate a seed" },
 +    { "-rerunvsite", FALSE, etBOOL, {&bRerunVSite},
 +      "HIDDENRecalculate virtual site coordinates with [TT]-rerun[tt]" },
 +    { "-ionize",  FALSE, etBOOL,{&bIonize},
 +      "Do a simulation including the effect of an X-Ray bombardment on your system" },
 +    { "-confout", FALSE, etBOOL, {&bConfout},
 +      "HIDDENWrite the last configuration with [TT]-c[tt] and force checkpointing at the last step" },
 +    { "-stepout", FALSE, etINT, {&nstepout},
 +      "HIDDENFrequency of writing the remaining runtime" },
 +    { "-resetstep", FALSE, etINT, {&resetstep},
 +      "HIDDENReset cycle counters after these many time steps" },
 +    { "-resethway", FALSE, etBOOL, {&bResetCountersHalfWay},
 +      "HIDDENReset the cycle counters after half the number of steps or halfway [TT]-maxh[tt]" }
 +#ifdef GMX_OPENMM
 +    ,
 +    { "-device",  FALSE, etSTR, {&deviceOptions},
 +      "Device option string" }
 +#endif
 +  };
 +  gmx_edsam_t  ed;
 +  unsigned long Flags, PCA_Flags;
 +  ivec     ddxyz;
 +  int      dd_node_order;
 +  gmx_bool     bAddPart;
 +  FILE     *fplog,*fptest;
 +  int      sim_part,sim_part_fn;
 +  const char *part_suffix=".part";
 +  char     suffix[STRLEN];
 +  int      rc;
 +  char **multidir=NULL;
 +
 +
 +  cr = init_par(&argc,&argv);
 +
 +  if (MASTER(cr))
 +    CopyRight(stderr, argv[0]);
 +
 +  PCA_Flags = (PCA_CAN_SET_DEFFNM | (MASTER(cr) ? 0 : PCA_QUIET));
 +  
 +  /* Comment this in to do fexist calls only on master
 +   * works not with rerun or tables at the moment
 +   * also comment out the version of init_forcerec in md.c 
 +   * with NULL instead of opt2fn
 +   */
 +  /*
 +     if (!MASTER(cr))
 +     {
 +     PCA_Flags |= PCA_NOT_READ_NODE;
 +     }
 +     */
 +
 +  parse_common_args(&argc,argv,PCA_Flags, NFILE,fnm,asize(pa),pa,
 +                    asize(desc),desc,0,NULL, &oenv);
 +
 +
 +
 +  /* we set these early because they might be used in init_multisystem() 
 +     Note that there is the potential for npme>nnodes until the number of
 +     threads is set later on, if there's thread parallelization. That shouldn't
 +     lead to problems. */ 
 +  dd_node_order = nenum(ddno_opt);
 +  cr->npmenodes = npme;
 +
 +  /* now check the -multi and -multidir option */
 +  if (opt2bSet("-multidir", NFILE, fnm))
 +  {
 +      int i;
 +      if (nmultisim > 0)
 +      {
 +          gmx_fatal(FARGS, "mdrun -multi and -multidir options are mutually exclusive.");
 +      }
 +      nmultisim = opt2fns(&multidir, "-multidir", NFILE, fnm);
 +  }
 +
 +
 +  if (repl_ex_nst != 0 && nmultisim < 2)
 +      gmx_fatal(FARGS,"Need at least two replicas for replica exchange (option -multi)");
 +
 +  if (repl_ex_nex < 0)
 +      gmx_fatal(FARGS,"Replica exchange number of exchanges needs to be positive");
 +
 +  if (nmultisim > 1) {
 +#ifndef GMX_THREAD_MPI
 +    gmx_bool bParFn = (multidir == NULL);
 +    init_multisystem(cr, nmultisim, multidir, NFILE, fnm, bParFn);
 +#else
 +    gmx_fatal(FARGS,"mdrun -multi is not supported with the thread library.Please compile GROMACS with MPI support");
 +#endif
 +  }
 +
 +  bAddPart = !bAppendFiles;
 +
 +  /* Check if there is ANY checkpoint file available */       
 +  sim_part    = 1;
 +  sim_part_fn = sim_part;
 +  if (opt2bSet("-cpi",NFILE,fnm))
 +  {
 +      if (bSepPot && bAppendFiles)
 +      {
 +          gmx_fatal(FARGS,"Output file appending is not supported with -seppot");
 +      }
 +
 +      bAppendFiles =
 +                read_checkpoint_simulation_part(opt2fn_master("-cpi", NFILE,
 +                                                              fnm,cr),
 +                                                &sim_part_fn,NULL,cr,
 +                                                bAppendFiles,NFILE,fnm,
 +                                                part_suffix,&bAddPart);
 +      if (sim_part_fn==0 && MASTER(cr))
 +      {
 +          fprintf(stdout,"No previous checkpoint file present, assuming this is a new run.\n");
 +      }
 +      else
 +      {
 +          sim_part = sim_part_fn + 1;
 +      }
 +
 +      if (MULTISIM(cr) && MASTER(cr))
 +      {
 +          check_multi_int(stdout,cr->ms,sim_part,"simulation part");
 +      }
 +  } 
 +  else
 +  {
 +      bAppendFiles = FALSE;
 +  }
 +
 +  if (!bAppendFiles)
 +  {
 +      sim_part_fn = sim_part;
 +  }
 +
 +  if (bAddPart)
 +  {
 +      /* Rename all output files (except checkpoint files) */
 +      /* create new part name first (zero-filled) */
 +      sprintf(suffix,"%s%04d",part_suffix,sim_part_fn);
 +
 +      add_suffix_to_output_names(fnm,NFILE,suffix);
 +      if (MASTER(cr))
 +      {
 +          fprintf(stdout,"Checkpoint file is from part %d, new output files will be suffixed '%s'.\n",sim_part-1,suffix);
 +      }
 +  }
 +
 +  Flags = opt2bSet("-rerun",NFILE,fnm) ? MD_RERUN : 0;
 +  Flags = Flags | (bSepPot       ? MD_SEPPOT       : 0);
 +  Flags = Flags | (bIonize       ? MD_IONIZE       : 0);
 +  Flags = Flags | (bPartDec      ? MD_PARTDEC      : 0);
 +  Flags = Flags | (bDDBondCheck  ? MD_DDBONDCHECK  : 0);
 +  Flags = Flags | (bDDBondComm   ? MD_DDBONDCOMM   : 0);
 +  Flags = Flags | (bTunePME      ? MD_TUNEPME      : 0);
 +  Flags = Flags | (bTestVerlet   ? MD_TESTVERLET   : 0);
 +  Flags = Flags | (bConfout      ? MD_CONFOUT      : 0);
 +  Flags = Flags | (bRerunVSite   ? MD_RERUN_VSITE  : 0);
 +  Flags = Flags | (bReproducible ? MD_REPRODUCIBLE : 0);
 +  Flags = Flags | (bAppendFiles  ? MD_APPENDFILES  : 0); 
 +  Flags = Flags | (opt2parg_bSet("-append", asize(pa),pa) ? MD_APPENDFILESSET : 0); 
 +  Flags = Flags | (bKeepAndNumCPT ? MD_KEEPANDNUMCPT : 0); 
 +  Flags = Flags | (sim_part>1    ? MD_STARTFROMCPT : 0); 
 +  Flags = Flags | (bResetCountersHalfWay ? MD_RESETCOUNTERSHALFWAY : 0);
 +
 +
 +  /* We postpone opening the log file if we are appending, so we can 
 +     first truncate the old log file and append to the correct position 
 +     there instead.  */
 +  if ((MASTER(cr) || bSepPot) && !bAppendFiles) 
 +  {
 +      gmx_log_open(ftp2fn(efLOG,NFILE,fnm),cr,
 +                   !bSepPot,Flags & MD_APPENDFILES,&fplog);
 +      CopyRight(fplog,argv[0]);
 +      please_cite(fplog,"Hess2008b");
 +      please_cite(fplog,"Spoel2005a");
 +      please_cite(fplog,"Lindahl2001a");
 +      please_cite(fplog,"Berendsen95a");
 +  }
 +  else if (!MASTER(cr) && bSepPot)
 +  {
 +      gmx_log_open(ftp2fn(efLOG,NFILE,fnm),cr,!bSepPot,Flags,&fplog);
 +  }
 +  else
 +  {
 +      fplog = NULL;
 +  }
 +
 +  ddxyz[XX] = (int)(realddxyz[XX] + 0.5);
 +  ddxyz[YY] = (int)(realddxyz[YY] + 0.5);
 +  ddxyz[ZZ] = (int)(realddxyz[ZZ] + 0.5);
 +
 +  rc = mdrunner(&hw_opt, fplog,cr,NFILE,fnm,oenv,bVerbose,bCompact,
 +                nstglobalcomm, ddxyz,dd_node_order,rdd,rconstr,
 +                dddlb_opt[0],dlb_scale,ddcsx,ddcsy,ddcsz,
 +                nbpu_opt[0],
 +                nsteps,nstepout,resetstep,
 +                nmultisim,repl_ex_nst,repl_ex_nex,repl_ex_seed,
 +                pforce, cpt_period,max_hours,deviceOptions,Flags);
 +
 +  gmx_finalize_par();
 +
 +  if (MULTIMASTER(cr)) {
 +      thanx(stderr);
 +  }
 +
 +  /* Log file has to be closed in mdrunner if we are appending to it 
 +     (fplog not set here) */
 +  if (MASTER(cr) && !bAppendFiles) 
 +  {
 +      gmx_log_close(fplog);
 +  }
 +
 +  return rc;
 +}
 +
index ab5e4bdaf566a0fce2713c1f6720b0b1b7022870,0000000000000000000000000000000000000000..fbde22b6e9ac4819f6cd88d392362d13539692ad
mode 100644,000000..100644
--- /dev/null
@@@ -1,1395 -1,0 +1,1427 @@@
-     int *pind = re->destinations;
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <math.h>
 +#include "repl_ex.h"
 +#include "network.h"
 +#include "random.h"
 +#include "smalloc.h"
 +#include "physics.h"
 +#include "copyrite.h"
 +#include "macros.h"
 +#include "vec.h"
 +#include "names.h"
 +#include "mvdata.h"
 +#include "domdec.h"
 +#include "partdec.h"
 +
 +#define PROBABILITYCUTOFF 100
 +/* we don't bother evaluating if events are more rare than exp(-100) = 3.7x10^-44 */
 +
 +enum { ereTEMP, ereLAMBDA, ereENDSINGLE ,ereTL, ereNR };
 +const char *erename[ereNR] = { "temperature", "lambda", "end_single_marker", "temperature and lambda"};
 +/* end_single_marker merely notes the end of single variable replica exchange. All types higher than
 +   it are multiple replica exchange methods */
 +/* Eventually, should add 'pressure', 'temperature and pressure', 'lambda_and_pressure', 'temperature_lambda_pressure'?;
 +   Let's wait until we feel better about the pressure control methods giving exact ensembles.  Right now, we assume constant pressure  */
 +
 +typedef struct gmx_repl_ex
 +{
 +    int  repl;
 +    int  nrepl;
 +    real temp;
 +    int  type;
 +    real **q;
 +    gmx_bool bNPT;
 +    real *pres;
 +    int  *ind;
 +    int *allswaps;
 +    int  nst;
 +    int nex;
 +    int  seed;
 +    int  nattempt[2];
 +    real *prob_sum;
 +    int  **nmoves;
 +    int  *nexchange;
 +
 +    /* these are helper arrays for replica exchange; allocated here so they
 +       don't have to be allocated each time */
 +    int *destinations;
 +    int **cyclic;
 +    int **order;
 +    int *tmpswap;
 +    gmx_bool *incycle;
 +    gmx_bool *bEx;
 +
 +    /* helper arrays to hold the quantities that are exchanged */
 +    real *prob;
 +    real *Epot;
 +    real *beta;
 +    real *Vol;
 +    real **de;
 +
 +} t_gmx_repl_ex;
 +
 +static gmx_bool repl_quantity(FILE *fplog,const gmx_multisim_t *ms,
 +                              struct gmx_repl_ex *re,int ere,real q)
 +{
 +    real *qall;
 +    gmx_bool bDiff;
 +    int  i,s;
 +
 +    snew(qall,ms->nsim);
 +    qall[re->repl] = q;
 +    gmx_sum_sim(ms->nsim,qall,ms);
 +
 +    bDiff = FALSE;
 +    for (s=1; s<ms->nsim; s++)
 +    {
 +        if (qall[s] != qall[0])
 +        {
 +            bDiff = TRUE;   
 +        }
 +    }
 +
 +    if (bDiff)
 +    {
 +        /* Set the replica exchange type and quantities */
 +        re->type = ere;
 +
 +        snew(re->q[ere],re->nrepl);
 +        for(s=0; s<ms->nsim; s++)
 +        {
 +            re->q[ere][s] = qall[s];
 +        }
 +    }
 +    sfree(qall);
 +    return bDiff;
 +}
 +
 +gmx_repl_ex_t init_replica_exchange(FILE *fplog,
 +                                    const gmx_multisim_t *ms,
 +                                    const t_state *state,
 +                                    const t_inputrec *ir,
 +                                    int nst, int nex, int init_seed)
 +{
 +    real temp,pres;
 +    int  i,j,k;
 +    struct gmx_repl_ex *re;
 +    gmx_bool bTemp;
 +    gmx_bool bLambda=FALSE;
 +
 +    fprintf(fplog,"\nInitializing Replica Exchange\n");
 +
 +    if (ms == NULL || ms->nsim == 1)
 +    {
 +        gmx_fatal(FARGS,"Nothing to exchange with only one replica, maybe you forgot to set the -multi option of mdrun?");
 +    }
 +
 +    snew(re,1);
 +
 +    re->repl     = ms->sim;
 +    re->nrepl    = ms->nsim;
 +    snew(re->q,ereENDSINGLE);
 +
 +    fprintf(fplog,"Repl  There are %d replicas:\n",re->nrepl);
 +
 +    check_multi_int(fplog,ms,state->natoms,"the number of atoms");
 +    check_multi_int(fplog,ms,ir->eI,"the integrator");
 +    check_multi_large_int(fplog,ms,ir->init_step+ir->nsteps,"init_step+nsteps");
 +    check_multi_large_int(fplog,ms,(ir->init_step+nst-1)/nst,
 +                          "first exchange step: init_step/-replex");
 +    check_multi_int(fplog,ms,ir->etc,"the temperature coupling");
 +    check_multi_int(fplog,ms,ir->opts.ngtc,
 +                    "the number of temperature coupling groups");
 +    check_multi_int(fplog,ms,ir->epc,"the pressure coupling");
 +    check_multi_int(fplog,ms,ir->efep,"free energy");
 +    check_multi_int(fplog,ms,ir->fepvals->n_lambda,"number of lambda states");
 +
 +    re->temp = ir->opts.ref_t[0];
 +    for(i=1; (i<ir->opts.ngtc); i++)
 +    {
 +        if (ir->opts.ref_t[i] != re->temp)
 +        {
 +            fprintf(fplog,"\nWARNING: The temperatures of the different temperature coupling groups are not identical\n\n");
 +            fprintf(stderr,"\nWARNING: The temperatures of the different temperature coupling groups are not identical\n\n");
 +        }
 +    }
 +
 +    re->type = -1;
 +    bTemp = repl_quantity(fplog,ms,re,ereTEMP,re->temp);
 +    if (ir->efep != efepNO)
 +    {
 +        bLambda = repl_quantity(fplog,ms,re,ereLAMBDA,(real)ir->fepvals->init_fep_state);
 +    }
 +    if (re->type == -1)  /* nothing was assigned */
 +    {
 +        gmx_fatal(FARGS,"The properties of the %d systems are all the same, there is nothing to exchange",re->nrepl);
 +    }
 +    if (bLambda && bTemp) {
 +        re->type = ereTL;
 +    }
 +
 +    if (bTemp)
 +    {
 +        please_cite(fplog,"Sugita1999a");
 +        if (ir->epc != epcNO)
 +        {
 +            re->bNPT = TRUE;
 +            fprintf(fplog,"Repl  Using Constant Pressure REMD.\n");
 +            please_cite(fplog,"Okabe2001a");
 +        }
 +        if (ir->etc == etcBERENDSEN)
 +        {
 +            gmx_fatal(FARGS,"REMD with the %s thermostat does not produce correct potential energy distributions, consider using the %s thermostat instead",
 +                      ETCOUPLTYPE(ir->etc),ETCOUPLTYPE(etcVRESCALE));
 +        }
 +    }
 +    if (bLambda) {
 +        if (ir->fepvals->delta_lambda != 0)   /* check this? */
 +        {
 +            gmx_fatal(FARGS,"delta_lambda is not zero");
 +        }
 +    }
 +    if (re->bNPT)
 +    {
 +        snew(re->pres,re->nrepl);
 +        if (ir->epct == epctSURFACETENSION)
 +        {
 +            pres = ir->ref_p[ZZ][ZZ];
 +        }
 +        else
 +        {
 +            pres = 0;
 +            j = 0;
 +            for(i=0; i<DIM; i++)
 +            {
 +                if (ir->compress[i][i] != 0)
 +                {
 +                    pres += ir->ref_p[i][i];
 +                    j++;
 +                }
 +            }
 +            pres /= j;
 +        }
 +        re->pres[re->repl] = pres;
 +        gmx_sum_sim(re->nrepl,re->pres,ms);
 +    }
 +
 +    /* Make an index for increasing replica order */
 +    /* only makes sense if one or the other is varying, not both!
 +       if both are varying, we trust the order the person gave. */
 +    snew(re->ind,re->nrepl);
 +    for(i=0; i<re->nrepl; i++)
 +    {
 +        re->ind[i] = i;
 +    }
 +
 +    if (re->type<ereENDSINGLE) {
 +
 +        for(i=0; i<re->nrepl; i++)
 +        {
 +            for(j=i+1; j<re->nrepl; j++)
 +            {
 +                if (re->q[re->type][re->ind[j]] < re->q[re->type][re->ind[i]])
 +                {
 +                    k = re->ind[i];
 +                    re->ind[i] = re->ind[j];
 +                    re->ind[j] = k;
 +                }
 +                else if (re->q[re->type][re->ind[j]] == re->q[re->type][re->ind[i]])
 +                {
 +                    gmx_fatal(FARGS,"Two replicas have identical %ss",erename[re->type]);
 +                }
 +            }
 +        }
 +    }
 +
 +    /* keep track of all the swaps, starting with the initial placement. */
 +    snew(re->allswaps,re->nrepl);
 +    for(i=0; i<re->nrepl; i++)
 +    {
 +        re->allswaps[i] = re->ind[i];
 +    }
 +
 +    switch (re->type)
 +    {
 +    case ereTEMP:
 +        fprintf(fplog,"\nReplica exchange in temperature\n");
 +        for(i=0; i<re->nrepl; i++)
 +        {
 +            fprintf(fplog," %5.1f",re->q[re->type][re->ind[i]]);
 +        }
 +        fprintf(fplog,"\n");
 +        break;
 +    case ereLAMBDA:
 +        fprintf(fplog,"\nReplica exchange in lambda\n");
 +        for(i=0; i<re->nrepl; i++)
 +        {
 +            fprintf(fplog," %3d",(int)re->q[re->type][re->ind[i]]);
 +        }
 +        fprintf(fplog,"\n");
 +        break;
 +    case ereTL:
 +        fprintf(fplog,"\nReplica exchange in temperature and lambda state\n");
 +        for(i=0; i<re->nrepl; i++)
 +        {
 +            fprintf(fplog," %5.1f",re->q[ereTEMP][re->ind[i]]);
 +        }
 +        fprintf(fplog,"\n");
 +        for(i=0; i<re->nrepl; i++)
 +        {
 +            fprintf(fplog," %5d",(int)re->q[ereLAMBDA][re->ind[i]]);
 +        }
 +        fprintf(fplog,"\n");
 +        break;
 +    default:
 +        gmx_incons("Unknown replica exchange quantity");
 +    }
 +    if (re->bNPT)
 +    {
 +        fprintf(fplog,"\nRepl  p");
 +        for(i=0; i<re->nrepl; i++)
 +        {
 +            fprintf(fplog," %5.2f",re->pres[re->ind[i]]);
 +        }
 +
 +        for(i=0; i<re->nrepl; i++)
 +        {
 +            if ((i > 0) && (re->pres[re->ind[i]] < re->pres[re->ind[i-1]]))
 +            {
 +                fprintf(fplog,"\nWARNING: The reference pressures decrease with increasing temperatures\n\n");
 +                fprintf(stderr,"\nWARNING: The reference pressures decrease with increasing temperatures\n\n");
 +            }
 +        }
 +    }
 +    re->nst = nst;
 +    if (init_seed == -1)
 +    {
 +        if (MASTERSIM(ms))
 +        {
 +            re->seed = make_seed();
 +        }
 +        else
 +        {
 +            re->seed = 0;
 +        }
 +        gmx_sumi_sim(1,&(re->seed),ms);
 +    }
 +    else
 +    {
 +        re->seed = init_seed;
 +    }
 +    fprintf(fplog,"\nReplica exchange interval: %d\n",re->nst);
 +    fprintf(fplog,"\nReplica random seed: %d\n",re->seed);
 +
 +    re->nattempt[0] = 0;
 +    re->nattempt[1] = 0;
 +
 +    snew(re->prob_sum,re->nrepl);
 +    snew(re->nexchange,re->nrepl);
 +    snew(re->nmoves,re->nrepl);
 +    for (i=0;i<re->nrepl;i++) 
 +    {
 +        snew(re->nmoves[i],re->nrepl);
 +    }
 +    fprintf(fplog,"Replica exchange information below: x=exchange, pr=probability\n");
 +
 +    /* generate space for the helper functions so we don't have to snew each time */
 +
 +    snew(re->destinations,re->nrepl);
 +    snew(re->incycle,re->nrepl);
 +    snew(re->tmpswap,re->nrepl);
 +    snew(re->cyclic,re->nrepl);
 +    snew(re->order,re->nrepl);
 +    for (i=0;i<re->nrepl;i++)
 +    {
 +        snew(re->cyclic[i],re->nrepl);
 +        snew(re->order[i],re->nrepl);
 +    }
 +    /* allocate space for the functions storing the data for the replicas */
 +    /* not all of these arrays needed in all cases, but they don't take
 +       up much space, since the max size is nrepl**2 */
 +    snew(re->prob,re->nrepl);
 +    snew(re->bEx,re->nrepl);
 +    snew(re->beta,re->nrepl);
 +    snew(re->Vol,re->nrepl);
 +    snew(re->Epot,re->nrepl);
 +    snew(re->de,re->nrepl);
 +    for (i=0;i<re->nrepl;i++)
 +    {
 +        snew(re->de[i],re->nrepl);
 +    }
 +    re->nex = nex;
 +    return re;
 +}
 +
 +static void exchange_reals(const gmx_multisim_t *ms,int b,real *v,int n)
 +{
 +    real *buf;
 +    int  i;
 +
 +    if (v)
 +    {
 +        snew(buf,n);
 +#ifdef GMX_MPI
 +        /*
 +          MPI_Sendrecv(v,  n*sizeof(real),MPI_BYTE,MSRANK(ms,b),0,
 +          buf,n*sizeof(real),MPI_BYTE,MSRANK(ms,b),0,
 +          ms->mpi_comm_masters,MPI_STATUS_IGNORE);
 +        */
 +        {
 +            MPI_Request mpi_req;
 +
 +            MPI_Isend(v,n*sizeof(real),MPI_BYTE,MSRANK(ms,b),0,
 +                      ms->mpi_comm_masters,&mpi_req);
 +            MPI_Recv(buf,n*sizeof(real),MPI_BYTE,MSRANK(ms,b),0,
 +                     ms->mpi_comm_masters,MPI_STATUS_IGNORE);
 +            MPI_Wait(&mpi_req,MPI_STATUS_IGNORE);
 +        }
 +#endif
 +        for(i=0; i<n; i++)
 +        {
 +            v[i] = buf[i];
 +        }
 +        sfree(buf);
 +    }
 +}
 +
 +
 +static void exchange_ints(const gmx_multisim_t *ms,int b,int *v,int n)
 +{
 +  int *buf;
 +  int  i;
 +
 +  if (v) {
 +    snew(buf,n);
 +#ifdef GMX_MPI
 +    /*
 +    MPI_Sendrecv(v,  n*sizeof(int),MPI_BYTE,MSRANK(ms,b),0,
 +               buf,n*sizeof(int),MPI_BYTE,MSRANK(ms,b),0,
 +               ms->mpi_comm_masters,MPI_STATUS_IGNORE);
 +    */
 +    {
 +      MPI_Request mpi_req;
 +
 +      MPI_Isend(v,n*sizeof(int),MPI_BYTE,MSRANK(ms,b),0,
 +              ms->mpi_comm_masters,&mpi_req);
 +      MPI_Recv(buf,n*sizeof(int),MPI_BYTE,MSRANK(ms,b),0,
 +             ms->mpi_comm_masters,MPI_STATUS_IGNORE);
 +      MPI_Wait(&mpi_req,MPI_STATUS_IGNORE);
 +    }
 +#endif
 +    for(i=0; i<n; i++) 
 +    {
 +        v[i] = buf[i];
 +    }
 +    sfree(buf);
 +  }
 +}
 +
 +static void exchange_doubles(const gmx_multisim_t *ms,int b,double *v,int n)
 +{
 +    double *buf;
 +    int  i;
 +
 +    if (v)
 +    {
 +        snew(buf,n);
 +#ifdef GMX_MPI
 +        /*
 +          MPI_Sendrecv(v,  n*sizeof(double),MPI_BYTE,MSRANK(ms,b),0,
 +          buf,n*sizeof(double),MPI_BYTE,MSRANK(ms,b),0,
 +          ms->mpi_comm_masters,MPI_STATUS_IGNORE);
 +        */
 +        {
 +            MPI_Request mpi_req;
 +
 +            MPI_Isend(v,n*sizeof(double),MPI_BYTE,MSRANK(ms,b),0,
 +                      ms->mpi_comm_masters,&mpi_req);
 +            MPI_Recv(buf,n*sizeof(double),MPI_BYTE,MSRANK(ms,b),0,
 +                     ms->mpi_comm_masters,MPI_STATUS_IGNORE);
 +            MPI_Wait(&mpi_req,MPI_STATUS_IGNORE);
 +        }
 +#endif
 +        for(i=0; i<n; i++)
 +        {
 +            v[i] = buf[i];
 +        }
 +        sfree(buf);
 +    }
 +}
 +
 +static void exchange_rvecs(const gmx_multisim_t *ms,int b,rvec *v,int n)
 +{
 +    rvec *buf;
 +    int  i;
 +  
 +    if (v)
 +    {
 +        snew(buf,n);
 +#ifdef GMX_MPI
 +        /*
 +          MPI_Sendrecv(v[0],  n*sizeof(rvec),MPI_BYTE,MSRANK(ms,b),0,
 +          buf[0],n*sizeof(rvec),MPI_BYTE,MSRANK(ms,b),0,
 +          ms->mpi_comm_masters,MPI_STATUS_IGNORE);
 +        */
 +        {
 +            MPI_Request mpi_req;
 +
 +            MPI_Isend(v[0],n*sizeof(rvec),MPI_BYTE,MSRANK(ms,b),0,
 +                      ms->mpi_comm_masters,&mpi_req);
 +            MPI_Recv(buf[0],n*sizeof(rvec),MPI_BYTE,MSRANK(ms,b),0,
 +                     ms->mpi_comm_masters,MPI_STATUS_IGNORE);
 +            MPI_Wait(&mpi_req,MPI_STATUS_IGNORE);
 +        }
 +#endif
 +        for(i=0; i<n; i++)
 +        {
 +            copy_rvec(buf[i],v[i]);
 +        }
 +        sfree(buf);
 +    }
 +}
 +
 +static void exchange_state(const gmx_multisim_t *ms,int b,t_state *state)
 +{
 +    /* When t_state changes, this code should be updated. */
 +    int ngtc,nnhpres;
 +    ngtc = state->ngtc * state->nhchainlength;
 +    nnhpres = state->nnhpres* state->nhchainlength;
 +    exchange_rvecs(ms,b,state->box,DIM);
 +    exchange_rvecs(ms,b,state->box_rel,DIM);
 +    exchange_rvecs(ms,b,state->boxv,DIM);
 +    exchange_reals(ms,b,&(state->veta),1);
 +    exchange_reals(ms,b,&(state->vol0),1);
 +    exchange_rvecs(ms,b,state->svir_prev,DIM);
 +    exchange_rvecs(ms,b,state->fvir_prev,DIM);
 +    exchange_rvecs(ms,b,state->pres_prev,DIM);
 +    exchange_doubles(ms,b,state->nosehoover_xi,ngtc);
 +    exchange_doubles(ms,b,state->nosehoover_vxi,ngtc);
 +    exchange_doubles(ms,b,state->nhpres_xi,nnhpres);
 +    exchange_doubles(ms,b,state->nhpres_vxi,nnhpres);
 +    exchange_doubles(ms,b,state->therm_integral,state->ngtc);
 +    exchange_rvecs(ms,b,state->x,state->natoms);
 +    exchange_rvecs(ms,b,state->v,state->natoms);
 +    exchange_rvecs(ms,b,state->sd_X,state->natoms);
 +}
 +
 +static void copy_rvecs(rvec *s,rvec *d,int n)
 +{
 +    int i;
 +
 +    if (d != NULL)
 +    {
 +        for(i=0; i<n; i++)
 +        {
 +            copy_rvec(s[i],d[i]);
 +        }
 +    }
 +}
 +
 +static void copy_doubles(const double *s,double *d,int n)
 +{
 +    int i;
 +
 +    if (d != NULL)
 +    {
 +        for(i=0; i<n; i++)
 +        {
 +            d[i] = s[i];
 +        }
 +    }
 +}
 +
 +static void copy_reals(const real *s,real *d,int n)
 +{
 +    int i;
 +
 +    if (d != NULL)
 +    {
 +        for(i=0; i<n; i++)
 +        {
 +            d[i] = s[i];
 +        }
 +    }
 +}
 +
 +static void copy_ints(const int *s,int *d,int n)
 +{
 +    int i;
 +
 +    if (d != NULL)
 +    {
 +        for(i=0; i<n; i++)
 +        {
 +            d[i] = s[i];
 +        }
 +    }
 +}
 +
 +#define scopy_rvecs(v,n)   copy_rvecs(state->v,state_local->v,n);
 +#define scopy_doubles(v,n) copy_doubles(state->v,state_local->v,n);
 +#define scopy_reals(v,n) copy_reals(state->v,state_local->v,n);
 +#define scopy_ints(v,n)   copy_ints(state->v,state_local->v,n);
 +
 +static void copy_state_nonatomdata(t_state *state,t_state *state_local)
 +{
 +    /* When t_state changes, this code should be updated. */
 +    int ngtc,nnhpres;
 +    ngtc = state->ngtc * state->nhchainlength;
 +    nnhpres = state->nnhpres* state->nhchainlength;
 +    scopy_rvecs(box,DIM);
 +    scopy_rvecs(box_rel,DIM);
 +    scopy_rvecs(boxv,DIM);
 +    state_local->veta = state->veta;
 +    state_local->vol0 = state->vol0;
 +    scopy_rvecs(svir_prev,DIM);
 +    scopy_rvecs(fvir_prev,DIM);
 +    scopy_rvecs(pres_prev,DIM);
 +    scopy_doubles(nosehoover_xi,ngtc);
 +    scopy_doubles(nosehoover_vxi,ngtc);
 +    scopy_doubles(nhpres_xi,nnhpres);
 +    scopy_doubles(nhpres_vxi,nnhpres);
 +    scopy_doubles(therm_integral,state->ngtc);
 +    scopy_rvecs(x,state->natoms);
 +    scopy_rvecs(v,state->natoms);
 +    scopy_rvecs(sd_X,state->natoms);
 +    copy_ints(&(state->fep_state),&(state_local->fep_state),1);
 +    scopy_reals(lambda,efptNR);
 +}
 +
 +static void scale_velocities(t_state *state,real fac)
 +{
 +    int i;
 +
 +    if (state->v)
 +    {
 +        for(i=0; i<state->natoms; i++)
 +        {
 +            svmul(fac,state->v[i],state->v[i]);
 +        }
 +    }
 +}
 +
 +static void pd_collect_state(const t_commrec *cr,t_state *state)
 +{
 +    int shift;
 +  
 +    if (debug)
 +    {
 +        fprintf(debug,"Collecting state before exchange\n");
 +    }
 +    shift = cr->nnodes - cr->npmenodes - 1;
 +    move_rvecs(cr,FALSE,FALSE,GMX_LEFT,GMX_RIGHT,state->x,NULL,shift,NULL);
 +    if (state->v)
 +    {
 +        move_rvecs(cr,FALSE,FALSE,GMX_LEFT,GMX_RIGHT,state->v,NULL,shift,NULL);
 +    }
 +    if (state->sd_X)
 +    {
 +        move_rvecs(cr,FALSE,FALSE,GMX_LEFT,GMX_RIGHT,state->sd_X,NULL,shift,NULL);
 +    }
 +}
 +
 +static void print_transition_matrix(FILE *fplog,const char *leg,int n,int **nmoves, int *nattempt)
 +{
 +    int i,j,ntot;
 +    float Tprint;
 +
 +    ntot = nattempt[0] + nattempt[1];
 +    fprintf(fplog,"\n");
 +    fprintf(fplog,"Repl");
 +    for (i=0;i<n;i++)
 +    {
 +        fprintf(fplog,"    ");  /* put the title closer to the center */
 +    }
 +    fprintf(fplog,"Empirical Transition Matrix\n");
 +
 +    fprintf(fplog,"Repl");
 +    for (i=0;i<n;i++)
 +    {
 +        fprintf(fplog,"%8d",(i+1));
 +    }
 +    fprintf(fplog,"\n");
 +
 +    for (i=0;i<n;i++)
 +    {
 +        fprintf(fplog,"Repl");
 +        for (j=0;j<n;j++)
 +        {
 +            Tprint = 0.0;
 +            if (nmoves[i][j] > 0)
 +            {
 +                Tprint = nmoves[i][j]/(2.0*ntot);
 +            }
 +            fprintf(fplog,"%8.4f",Tprint);
 +        }
 +        fprintf(fplog,"%3d\n",i);
 +    }
 +}
 +
 +static void print_ind(FILE *fplog,const char *leg,int n,int *ind,gmx_bool *bEx)
 +{
 +    int i;
 +
 +    fprintf(fplog,"Repl %2s %2d",leg,ind[0]);
 +    for(i=1; i<n; i++)
 +    {
 +        fprintf(fplog," %c %2d",(bEx!=0 && bEx[i]) ? 'x' : ' ',ind[i]);
 +    }
 +    fprintf(fplog,"\n");
 +}
 +
 +static void print_allswitchind(FILE *fplog,int n,int *ind,int *pind, int *allswaps, int *tmpswap)
 +{
 +    int i;
 +
 +    for (i=0;i<n;i++)
 +    {
 +        tmpswap[i] = allswaps[i];
 +    }
 +    for (i=0;i<n;i++)
 +    {
 +        allswaps[i] = tmpswap[pind[i]];
 +    }
 +
 +    fprintf(fplog,"\nAccepted Exchanges:   ");
 +    for (i=0;i<n;i++)
 +    {
 +        fprintf(fplog,"%d ",pind[i]);
 +    }
 +    fprintf(fplog,"\n");
 +
++    /* the "Order After Exchange" is the state label corresponding to the configuration that
++       started in state listed in order, i.e.
++
++       3 0 1 2
++
++       means that the:
++       configuration starting in simulation 3 is now in simulation 0,
++       configuration starting in simulation 0 is now in simulation 1,
++       configuration starting in simulation 1 is now in simulation 2,
++       configuration starting in simulation 2 is now in simulation 3
++     */
 +    fprintf(fplog,"Order After Exchange: ");
 +    for (i=0;i<n;i++)
 +    {
 +        fprintf(fplog,"%d ",allswaps[i]);
 +    }
 +    fprintf(fplog,"\n\n");
 +}
 +
 +static void print_prob(FILE *fplog,const char *leg,int n,real *prob)
 +{
 +    int  i;
 +    char buf[8];
 +  
 +    fprintf(fplog,"Repl %2s ",leg);
 +    for(i=1; i<n; i++)
 +    {
 +        if (prob[i] >= 0)
 +        {
 +            sprintf(buf,"%4.2f",prob[i]);
 +            fprintf(fplog,"  %3s",buf[0]=='1' ? "1.0" : buf+1);
 +        }
 +        else
 +        {
 +            fprintf(fplog,"     ");
 +        }
 +    }
 +    fprintf(fplog,"\n");
 +}
 +
 +static void print_count(FILE *fplog,const char *leg,int n,int *count)
 +{
 +    int i;
 +
 +    fprintf(fplog,"Repl %2s ",leg);
 +    for(i=1; i<n; i++)
 +    {
 +        fprintf(fplog," %4d",count[i]);
 +    }
 +    fprintf(fplog,"\n");
 +}
 +
 +static real calc_delta(FILE *fplog, gmx_bool bPrint, struct gmx_repl_ex *re, int a, int b, int ap, int bp) {
 +
 +    real ediff,dpV,delta=0;
 +    real *Epot = re->Epot;
 +    real *Vol = re->Vol;
 +    real **de = re->de;
 +    real *beta = re->beta;
 +
 +    /* Two cases; we are permuted and not.  In all cases, setting ap = a and bp = b will reduce
 +       to the non permuted case */
 +
 +    switch (re->type)
 +    {
 +    case ereTEMP:
 +        /*
 +         * Okabe et. al. Chem. Phys. Lett. 335 (2001) 435-439
 +         */
 +        ediff = Epot[b] - Epot[a];
 +        delta = -(beta[bp] - beta[ap])*ediff;
 +        break;
 +    case ereLAMBDA:
 +        /* two cases:  when we are permuted, and not.  */
 +        /* non-permuted:
 +           ediff =  E_new - E_old
 +                 =  [H_b(x_a) + H_a(x_b)] - [H_b(x_b) + H_a(x_a)]
 +                 =  [H_b(x_a) - H_a(x_a)] + [H_a(x_b) - H_b(x_b)]
 +                 =  de[b][a] + de[a][b] */
++
 +        /* permuted:
 +           ediff =  E_new - E_old
 +                 =  [H_bp(x_a) + H_ap(x_b)] - [H_bp(x_b) + H_ap(x_a)]
 +                 =  [H_bp(x_a) - H_ap(x_a)] + [H_ap(x_b) - H_bp(x_b)]
 +                 =  [H_bp(x_a) - H_a(x_a) + H_a(x_a) - H_ap(x_a)] + [H_ap(x_b) - H_b(x_b) + H_b(x_b) - H_bp(x_b)]
 +                 =  [H_bp(x_a) - H_a(x_a)] - [H_ap(x_a) - H_a(x_a)] + [H_ap(x_b) - H_b(x_b)] - H_bp(x_b) - H_b(x_b)]
 +                 =  (de[bp][a] - de[ap][a]) + (de[ap][b] - de[bp][b])    */
++        /* but, in the current code implementation, we flip configurations, not indices . . .
++           So let's examine that.
++                 =  [H_b(x_ap) - H_a(x_a)] - [H_a(x_ap) - H_a(x_a)] + [H_a(x_bp) - H_b(x_b)] - H_b(x_bp) - H_b(x_b)]
++                 =  [H_b(x_ap) - H_a(x_ap)]  + [H_a(x_bp) - H_b(x_pb)]
++                 = (de[b][ap] - de[a][ap]) + (de[a][bp] - de[b][bp]
++                 So, if we exchange b<=> bp and a<=> ap, we return to the same result.
++                 So the simple solution is to flip the
++                 position of perturbed and original indices in the tests.
++        */
++
 +        ediff = (de[bp][a] - de[ap][a]) + (de[ap][b] - de[bp][b]);
 +        delta = ediff*beta[a]; /* assume all same temperature in this case */
 +        break;
 +    case ereTL:
 +        /* not permuted:  */
 +        /* delta =  reduced E_new - reduced E_old
 +                 =  [beta_b H_b(x_a) + beta_a H_a(x_b)] - [beta_b H_b(x_b) + beta_a H_a(x_a)]
 +                 =  [beta_b H_b(x_a) - beta_a H_a(x_a)] + [beta_a H_a(x_b) - beta_b H_b(x_b)]
 +                 =  [beta_b dH_b(x_a) + beta_b H_a(x_a) - beta_a H_a(x_a)] +
 +                    [beta_a dH_a(x_b) + beta_a H_b(x_b) - beta_b H_b(x_b)]
 +                 =  [beta_b dH_b(x_a) + [beta_a dH_a(x_b) +
 +                    beta_b (H_a(x_a) - H_b(x_b)]) - beta_a (H_a(x_a) - H_b(x_b))
 +                 =  beta_b dH_b(x_a) + beta_a dH_a(x_b) - (beta_b - beta_a)(H_b(x_b) - H_a(x_a) */
 +        /* delta = beta[b]*de[b][a] + beta[a]*de[a][b] - (beta[b] - beta[a])*(Epot[b] - Epot[a]; */
 +        /* permuted (big breath!) */
 +        /*   delta =  reduced E_new - reduced E_old
 +                 =  [beta_bp H_bp(x_a) + beta_ap H_ap(x_b)] - [beta_bp H_bp(x_b) + beta_ap H_ap(x_a)]
 +                 =  [beta_bp H_bp(x_a) - beta_ap H_ap(x_a)] + [beta_ap H_ap(x_b) - beta_bp H_bp(x_b)]
 +                 =  [beta_bp H_bp(x_a) - beta_ap H_ap(x_a)] + [beta_ap H_ap(x_b) - beta_bp H_bp(x_b)]
 +                    - beta_pb H_a(x_a) + beta_ap H_a(x_a) + beta_pb H_a(x_a) - beta_ap H_a(x_a)
 +                    - beta_ap H_b(x_b) + beta_bp H_b(x_b) + beta_ap H_b(x_b) - beta_bp H_b(x_b)
 +                 =  [(beta_bp H_bp(x_a) - beta_bp H_a(x_a)) - (beta_ap H_ap(x_a) - beta_ap H_a(x_a))] +
 +                    [(beta_ap H_ap(x_b)  - beta_ap H_b(x_b)) - (beta_bp H_bp(x_b) - beta_bp H_b(x_b))]
 +                    + beta_pb H_a(x_a) - beta_ap H_a(x_a) + beta_ap H_b(x_b) - beta_bp H_b(x_b)
 +                 =  [beta_bp (H_bp(x_a) - H_a(x_a)) - beta_ap (H_ap(x_a) - H_a(x_a))] +
 +                    [beta_ap (H_ap(x_b) - H_b(x_b)) - beta_bp (H_bp(x_b) - H_b(x_b))]
 +                    + beta_pb (H_a(x_a) - H_b(x_b))  - beta_ap (H_a(x_a) - H_b(x_b))
 +                 =  ([beta_bp de[bp][a] - beta_ap de[ap][a]) + beta_ap de[ap][b]  - beta_bp de[bp][b])
 +                 + (beta_pb-beta_ap)(H_a(x_a) - H_b(x_b))  */
 +        delta = beta[bp]*(de[bp][a] - de[bp][b]) + beta[ap]*(de[ap][b] - de[ap][a]) - (beta[bp]-beta[ap])*(Epot[b]-Epot[a]);
 +        break;
 +    default:
 +        gmx_incons("Unknown replica exchange quantity");
 +    }
 +    if (bPrint)
 +    {
 +        fprintf(fplog,"Repl %d <-> %d  dE_term = %10.3e (kT)\n",a,b,delta);
 +    }
 +    if (re->bNPT)
 +    {
 +        /* revist the calculation for 5.0.  Might be some improvements. */
 +        dpV = (beta[ap]*re->pres[ap]-beta[bp]*re->pres[bp])*(Vol[b]-Vol[a])/PRESFAC;
 +        if (bPrint) 
 +        {
 +            fprintf(fplog,"  dpV = %10.3e  d = %10.3e\nb",dpV,delta + dpV);
 +        }
 +        delta += dpV;
 +    }
 +    return delta;
 +}
 +
 +static void
 +test_for_replica_exchange(FILE *fplog,
 +                          const gmx_multisim_t *ms,
 +                          struct gmx_repl_ex *re,
 +                          gmx_enerdata_t *enerd,
 +                          real vol,
 +                          gmx_large_int_t step,
 +                          real time)
 +{
 +    int  m,i,j,a,b,ap,bp,i0,i1,tmp;
 +    real ediff=0,delta=0,dpV=0;
 +    gmx_bool bPrint,bMultiEx;
 +    gmx_bool *bEx = re->bEx;
 +    real *prob = re->prob;
-             /* find out which state it is from, and what label that state currently has */
++    int *pind = re->destinations; /* permuted index */
 +    gmx_bool bEpot=FALSE;
 +    gmx_bool bDLambda=FALSE;
 +    gmx_bool bVol=FALSE;
 +
 +    bMultiEx = (re->nex > 1);  /* multiple exchanges at each state */
 +    fprintf(fplog,"Replica exchange at step " gmx_large_int_pfmt " time %g\n",step,time);
 +
 +    if (re->bNPT)
 +    {
 +        for (i=0;i<re->nrepl;i++)
 +        {
 +            re->Vol[i] = 0;
 +        }
 +        bVol = TRUE;
 +        re->Vol[re->repl]  = vol;
 +    }
 +    if ((re->type == ereTEMP || re->type == ereTL))
 +    {
 +        for (i=0;i<re->nrepl;i++)
 +        {
 +            re->Epot[i] = 0;
 +        }
 +        bEpot = TRUE;
 +        re->Epot[re->repl] = enerd->term[F_EPOT];
 +        /* temperatures of different states*/
 +        for (i=0;i<re->nrepl;i++)
 +        {
 +            re->beta[i] = 1.0/(re->q[ereTEMP][i]*BOLTZ);
 +        }
 +    }
 +    else
 +    {
 +        for (i=0;i<re->nrepl;i++)
 +        {
 +            re->beta[i] = 1.0/(re->temp*BOLTZ);  /* we have a single temperature */
 +        }
 +    }
 +    if (re->type == ereLAMBDA || re->type == ereTL)
 +    {
 +        bDLambda = TRUE;
 +        /* lambda differences. */
 +        /* de[i][j] is the energy of the jth simulation in the ith Hamiltonian
 +           minus the energy of the jth simulation in the jth Hamiltonian */
 +        for (i=0;i<re->nrepl;i++)
 +        {
 +            for (j=0;j<re->nrepl;j++)
 +            {
 +                re->de[i][j] = 0;
 +            }
 +        }
 +        for (i=0;i<re->nrepl;i++)
 +        {
 +            re->de[i][re->repl] = (enerd->enerpart_lambda[(int)re->q[ereLAMBDA][i]+1]-enerd->enerpart_lambda[0]);
 +        }
 +    }
 +
 +    /* now actually do the communication */
 +    if (bVol)
 +    {
 +        gmx_sum_sim(re->nrepl,re->Vol,ms);
 +    }
 +    if (bEpot)
 +    {
 +        gmx_sum_sim(re->nrepl,re->Epot,ms);
 +    }
 +    if (bDLambda)
 +    {
 +        for (i=0;i<re->nrepl;i++)
 +        {
 +            gmx_sum_sim(re->nrepl,re->de[i],ms);
 +        }
 +    }
 +
 +    /* make a duplicate set of indices for shuffling */
 +    for(i=0;i<re->nrepl;i++)
 +    {
 +        pind[i] = re->ind[i];
 +    }
 +
 +    if (bMultiEx)
 +    {
 +        /* multiple random switch exchange */
 +        for (i=0;i<re->nex;i++)
 +        {
 +            /* randomly select a pair  */
-                 continue;  /* got the same pair, back up and do it again */
++            /* in theory, could reduce this by identifying only which switches had a nonneglibible
++               probability of occurring (log p > -100) and only operate on those switches */
++            /* find out which state it is from, and what label that state currently has. Likely
++               more work that useful. */
 +            i0 = (int)(re->nrepl*rando(&(re->seed)));
 +            i1 = (int)(re->nrepl*rando(&(re->seed)));
 +            if (i0==i1)
 +            {
 +                i--;
-             a = re->ind[i0];
++                continue;  /* self-exchange, back up and do it again */
 +            }
 +
-             delta = calc_delta(fplog,bPrint,re,a,b,ap,bp); /* calculate the energy difference */
++            a = re->ind[i0]; /* what are the indices of these states? */
 +            b = re->ind[i1];
 +            ap = pind[i0];
 +            bp = pind[i1];
 +
 +            bPrint = FALSE; /* too noisy */
-             /* we actually only use the first space, since there are actually many switches between pairs. */
++            /* calculate the energy difference */
++            /* if the code changes to flip the STATES, rather than the configurations,
++               use the commented version of the code */
++            /* delta = calc_delta(fplog,bPrint,re,a,b,ap,bp); */
++            delta = calc_delta(fplog,bPrint,re,ap,bp,a,b);
 +
++            /* we actually only use the first space in the prob and bEx array,
++               since there are actually many switches between pairs. */
 +
 +            if (delta <= 0)
 +            {
 +                /* accepted */
 +                prob[0] = 1;
 +                bEx[0] = TRUE;
 +            }
 +            else
 +            {
 +                if (delta > PROBABILITYCUTOFF)
 +                {
 +                    prob[0] = 0;
 +                }
 +                else
 +                {
 +                    prob[0] = exp(-delta);
 +                }
 +                /* roll a number to determine if accepted */
 +                bEx[0] = (rando(&(re->seed)) < prob[0]);
 +            }
 +            re->prob_sum[0] += prob[0];
 +
 +            if (bEx[0])
 +            {
 +                /* swap the states */
 +                tmp = pind[i0];
 +                pind[i0] = pind[i1];
 +                pind[i1] = tmp;
 +            }
 +        }
 +        re->nattempt[0]++;  /* keep track of total permutation trials here */
 +        print_allswitchind(fplog,re->nrepl,re->ind,pind,re->allswaps,re->tmpswap);
 +    }
 +    else
 +    {
 +        /* standard nearest neighbor replica exchange */
 +        m = (step / re->nst) % 2;
 +        for(i=1; i<re->nrepl; i++)
 +        {
 +            a = re->ind[i-1];
 +            b = re->ind[i];
 +            
 +            bPrint = (re->repl==a || re->repl==b);
 +            if (i % 2 == m)
 +            {
 +                delta = calc_delta(fplog,bPrint,re,a,b,a,b);
 +                if (delta <= 0) {
 +                    /* accepted */
 +                    prob[i] = 1;
 +                    bEx[i] = TRUE;
 +                }
 +                else
 +                {
 +                    if (delta > PROBABILITYCUTOFF)
 +                    {
 +                        prob[i] = 0;
 +                    }
 +                    else
 +                    {
 +                        prob[i] = exp(-delta);
 +                    }
 +                    /* roll a number to determine if accepted */
 +                    bEx[i] = (rando(&(re->seed)) < prob[i]);
 +                }
 +                re->prob_sum[i] += prob[i];
 +
 +                if (bEx[i])
 +                {
 +                    /* swap these two */
 +                    tmp = pind[i-1];
 +                    pind[i-1] = pind[i];
 +                    pind[i] = tmp;
 +                    re->nexchange[i]++;  /* statistics for back compatibility */
 +                }
 +            }
 +            else
 +            {
 +                prob[i] = -1;
 +                bEx[i] = FALSE;
 +            }
 +        }
 +        /* print some statistics */
 +        print_ind(fplog,"ex",re->nrepl,re->ind,bEx);
 +        print_prob(fplog,"pr",re->nrepl,prob);
 +        fprintf(fplog,"\n");
 +        re->nattempt[m]++;
 +    }
 +
 +    /* record which moves were made and accepted */
 +    for (i=0;i<re->nrepl;i++)
 +    {
 +        re->nmoves[re->ind[i]][pind[i]] +=1;
 +        re->nmoves[pind[i]][re->ind[i]] +=1;
 +    }
++    fflush(fplog); /* make sure we can see what the last exchange was */
 +}
 +
 +static void write_debug_x(t_state *state)
 +{
 +    int i;
 +
 +    if (debug)
 +    {
 +        for(i=0; i<state->natoms; i+=10)
 +        {
 +            fprintf(debug,"dx %5d %10.5f %10.5f %10.5f\n",i,state->x[i][XX],state->x[i][YY],state->x[i][ZZ]);
 +        }
 +    }
 +}
 +
 +static void
 +cyclic_decomposition(FILE *fplog,
 +                     const int *destinations,
 +                     int **cyclic,
 +                     gmx_bool *incycle,
 +                     const int nrepl,
 +                     int *nswap)
 +{
 +
 +    int i,j,c,p;
 +    int maxlen = 1;
 +    for (i=0;i<nrepl;i++)
 +    {
 +        incycle[i] = FALSE;
 +    }
 +    for (i=0;i<nrepl;i++)  /* one cycle for each replica */
 +    {
 +        if (incycle[i])
 +        {
 +            cyclic[i][0] = -1;
 +            continue;
 +        }
 +        cyclic[i][0] = i;
 +        incycle[i] = TRUE;
 +        c = 1;
 +        p = i;
 +        for (j=0;j<nrepl;j++)  /* potentially all cycles are part, but we will break first */
 +        {
 +            p = destinations[p]; /* start permuting */
 +            if (p==i)
 +            {
 +                cyclic[i][c] = -1;
 +                if (c > maxlen)
 +                {
 +                    maxlen = c;
 +                }
 +                break; /* we've reached the original element, the cycle is complete, and we marked the end. */
 +            }
 +            else
 +            {
 +                cyclic[i][c] = p;  /* each permutation gives a new member of the cycle */
 +                incycle[p] = TRUE;
 +                c++;
 +            }
 +        }
 +    }
 +    *nswap = maxlen - 1;
 +
 +    if (debug)
 +    {
 +        for (i=0;i<nrepl;i++)
 +        {
 +            fprintf(debug,"Cycle %d:",i);
 +            for (j=0;j<nrepl;j++)
 +            {
 +                if (cyclic[i][j] < 0)
 +                {
 +                    break;
 +                }
 +                fprintf(debug,"%2d",cyclic[i][j]);
 +            }
 +            fprintf(debug,"\n");
 +        }
 +        fflush(debug);
 +    }
 +}
 +
 +static void
 +compute_exchange_order(FILE *fplog,
 +                       int **cyclic,
 +                       int **order,
 +                       const int nrepl,
 +                       const int maxswap)
 +{
 +    int i,j;
 +
 +    for (j=0;j<maxswap;j++)
 +    {
 +        for (i=0;i<nrepl;i++)
 +        {
 +            if (cyclic[i][j+1] >= 0)
 +            {
 +                order[cyclic[i][j+1]][j] = cyclic[i][j];
 +                order[cyclic[i][j]][j] = cyclic[i][j+1];
 +            }
 +        }
 +        for (i=0;i<nrepl;i++)
 +        {
 +            if (order[i][j] < 0)
 +            {
 +                order[i][j] = i; /* if it's not exchanging, it should stay this round*/
 +            }
 +        }
 +    }
 +
 +    if (debug)
 +    {
 +        fprintf(fplog,"Replica Exchange Order\n");
 +        for (i=0;i<nrepl;i++)
 +        {
 +            fprintf(fplog,"Replica %d:",i);
 +            for (j=0;j<maxswap;j++)
 +            {
 +                if (order[i][j] < 0) break;
 +                fprintf(debug,"%2d",order[i][j]);
 +            }
 +            fprintf(fplog,"\n");
 +        }
 +        fflush(fplog);
 +    }
 +}
 +
 +static void
 +prepare_to_do_exchange(FILE *fplog,
 +                       const int *destinations,
 +                       const int replica_id,
 +                       const int nrepl,
 +                       int *maxswap,
 +                       int **order,
 +                       int **cyclic,
 +                       int *incycle,
 +                       gmx_bool *bThisReplicaExchanged)
 +{
 +    int i,j;
 +    /* Hold the cyclic decomposition of the (multiple) replica
 +     * exchange. */
 +    gmx_bool bAnyReplicaExchanged = FALSE;
 +    *bThisReplicaExchanged = FALSE;
 +
 +    for (i = 0; i < nrepl; i++)
 +    {
 +        if (destinations[i] != i) {
 +            /* only mark as exchanged if the index has been shuffled */
 +            bAnyReplicaExchanged = TRUE;
 +            break;
 +        }
 +    }
 +    if (bAnyReplicaExchanged)
 +    {
 +        /* reinitialize the placeholder arrays */
 +        for (i = 0; i < nrepl; i++)
 +        {
 +            for (j = 0; j < nrepl; j++)
 +            {
 +                cyclic[i][j] = -1;
 +                order[i][j] = -1;
 +            }
 +        }
 +
 +        /* Identify the cyclic decomposition of the permutation (very
 +         * fast if neighbor replica exchange). */
 +        cyclic_decomposition(fplog,destinations,cyclic,incycle,nrepl,maxswap);
 +
 +        /* Now translate the decomposition into a replica exchange
 +         * order at each step. */
 +        compute_exchange_order(fplog,cyclic,order,nrepl,*maxswap);
 +
 +        /* Did this replica do any exchange at any point? */
 +        for (j = 0; j < *maxswap; j++)
 +        {
 +            if (replica_id != order[replica_id][j])
 +            {
 +                *bThisReplicaExchanged = TRUE;
 +                break;
 +            }
 +        }
 +    }
 +}
 +
 +gmx_bool replica_exchange(FILE *fplog,const t_commrec *cr,struct gmx_repl_ex *re,
 +                          t_state *state,gmx_enerdata_t *enerd,
 +                          t_state *state_local,gmx_large_int_t step,real time)
 +{
 +    int i,j;
 +    int replica_id = 0;
 +    int exchange_partner;
 +    int maxswap = 0;
 +    /* Number of rounds of exchanges needed to deal with any multiple
 +     * exchanges. */
 +    /* Where each replica ends up after the exchange attempt(s). */
 +    /* The order in which multiple exchanges will occur. */
 +    gmx_bool bThisReplicaExchanged = FALSE;
 +
 +    if (MASTER(cr))
 +    {
 +        replica_id  = re->repl;
 +        test_for_replica_exchange(fplog,cr->ms,re,enerd,det(state_local->box),step,time);
 +        prepare_to_do_exchange(fplog,re->destinations,replica_id,re->nrepl,&maxswap,
 +                               re->order,re->cyclic,re->incycle,&bThisReplicaExchanged);
 +    }
 +    /* Do intra-simulation broadcast so all processors belonging to
 +     * each simulation know whether they need to participate in
 +     * collecting the state. Otherwise, they might as well get on with
 +     * the next thing to do. */
 +    if (PAR(cr))
 +    {
 +#ifdef GMX_MPI
 +        MPI_Bcast(&bThisReplicaExchanged,sizeof(gmx_bool),MPI_BYTE,MASTERRANK(cr),
 +                  cr->mpi_comm_mygroup);
 +#endif
 +    }
 +
 +    if (bThisReplicaExchanged)
 +    {
 +        /* Exchange the states */
 +
 +        if (PAR(cr))
 +        {
 +            /* Collect the global state on the master node */
 +            if (DOMAINDECOMP(cr))
 +            {
 +                dd_collect_state(cr->dd,state_local,state);
 +            }
 +            else
 +            {
 +                pd_collect_state(cr,state);
 +            }
 +        }
 +        
 +        if (MASTER(cr))
 +        {
 +            /* There will be only one swap cycle with standard replica
 +             * exchange, but there may be multiple swap cycles if we
 +             * allow multiple swaps. */
++
 +            for (j = 0; j < maxswap; j++)
 +            {
 +                exchange_partner = re->order[replica_id][j];
 +
 +                if (exchange_partner != replica_id)
 +                {
 +                    /* Exchange the global states between the master nodes */
 +                    if (debug)
 +                    {
 +                        fprintf(debug,"Exchanging %d with %d\n",replica_id,exchange_partner);
 +                    }
 +                    exchange_state(cr->ms,exchange_partner,state);
 +                }
 +            }
 +            /* For temperature-type replica exchange, we need to scale
 +             * the velocities. */
 +            if (re->type == ereTEMP || re->type == ereTL)
 +            {
 +                scale_velocities(state,sqrt(re->q[ereTEMP][replica_id]/re->q[ereTEMP][re->destinations[replica_id]]));
 +            }
 +
 +        }
 +
 +        /* With domain decomposition the global state is distributed later */
 +        if (!DOMAINDECOMP(cr))
 +        {
 +            /* Copy the global state to the local state data structure */
 +            copy_state_nonatomdata(state,state_local);
 +            
 +            if (PAR(cr))
 +            {
 +                bcast_state(cr,state,FALSE);
 +            }
 +        }
 +    }
 +
 +    return bThisReplicaExchanged;
 +}
 +
 +void print_replica_exchange_statistics(FILE *fplog,struct gmx_repl_ex *re)
 +{
 +    int  i;
 +
 +    fprintf(fplog,"\nReplica exchange statistics\n");
 +
 +    if (re->nex == 0)
 +    {
 +        fprintf(fplog,"Repl  %d attempts, %d odd, %d even\n",
 +                re->nattempt[0]+re->nattempt[1],re->nattempt[1],re->nattempt[0]);
 +
 +        fprintf(fplog,"Repl  average probabilities:\n");
 +        for(i=1; i<re->nrepl; i++)
 +        {
 +            if (re->nattempt[i%2] == 0)
 +            {
 +                re->prob[i] = 0;
 +            }
 +            else
 +            {
 +                re->prob[i] =  re->prob_sum[i]/re->nattempt[i%2];
 +            }
 +        }
 +        print_ind(fplog,"",re->nrepl,re->ind,NULL);
 +        print_prob(fplog,"",re->nrepl,re->prob);
 +
 +        fprintf(fplog,"Repl  number of exchanges:\n");
 +        print_ind(fplog,"",re->nrepl,re->ind,NULL);
 +        print_count(fplog,"",re->nrepl,re->nexchange);
 +
 +        fprintf(fplog,"Repl  average number of exchanges:\n");
 +        for(i=1; i<re->nrepl; i++) 
 +        {
 +            if (re->nattempt[i%2] == 0)
 +            {
 +                re->prob[i] = 0;
 +            }
 +            else
 +            {
 +                re->prob[i] =  ((real)re->nexchange[i])/re->nattempt[i%2];
 +            }
 +        }
 +        print_ind(fplog,"",re->nrepl,re->ind,NULL);
 +        print_prob(fplog,"",re->nrepl,re->prob);
 +
 +        fprintf(fplog,"\n");
 +    }
 +    /* print the transition matrix */
 +    print_transition_matrix(fplog,"",re->nrepl,re->nmoves,re->nattempt);
 +}
index 61bab7e51a954cbb3bc5acba7f698257f68d92d0,0000000000000000000000000000000000000000..86db8f722534ce8ec808b8ff59db8c6dde2a3f28
mode 100644,000000..100644
--- /dev/null
@@@ -1,1996 -1,0 +1,2002 @@@
-     fprintf(stderr, "Starting %d tMPI threads\n",hw_opt->nthreads_tmpi);
-     fflush(stderr);
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +#if defined(HAVE_SCHED_H) && defined(HAVE_SCHED_GETAFFINITY)
 +#define _GNU_SOURCE
 +#include <sched.h>
 +#include <sys/syscall.h>
 +#endif
 +#include <signal.h>
 +#include <stdlib.h>
 +#ifdef HAVE_UNISTD_H
 +#include <unistd.h>
 +#endif
 +#include <string.h>
 +#include <assert.h>
 +
 +#include "typedefs.h"
 +#include "smalloc.h"
 +#include "sysstuff.h"
 +#include "statutil.h"
 +#include "mdrun.h"
 +#include "md_logging.h"
 +#include "md_support.h"
 +#include "network.h"
 +#include "pull.h"
 +#include "pull_rotation.h"
 +#include "names.h"
 +#include "disre.h"
 +#include "orires.h"
 +#include "pme.h"
 +#include "mdatoms.h"
 +#include "repl_ex.h"
 +#include "qmmm.h"
 +#include "domdec.h"
 +#include "partdec.h"
 +#include "coulomb.h"
 +#include "constr.h"
 +#include "mvdata.h"
 +#include "checkpoint.h"
 +#include "mtop_util.h"
 +#include "sighandler.h"
 +#include "tpxio.h"
 +#include "txtdump.h"
 +#include "gmx_detect_hardware.h"
 +#include "gmx_omp_nthreads.h"
 +#include "pull_rotation.h"
 +#include "calc_verletbuf.h"
 +#include "../mdlib/nbnxn_search.h"
 +#include "../mdlib/nbnxn_consts.h"
 +#include "gmx_fatal_collective.h"
 +#include "membed.h"
 +#include "macros.h"
 +#include "gmx_omp.h"
 +
 +#include "thread_mpi/threads.h"
 +
 +#ifdef GMX_LIB_MPI
 +#include <mpi.h>
 +#endif
 +#ifdef GMX_THREAD_MPI
 +#include "tmpi.h"
 +#endif
 +
 +#ifdef GMX_FAHCORE
 +#include "corewrap.h"
 +#endif
 +
 +#ifdef GMX_OPENMM
 +#include "md_openmm.h"
 +#endif
 +
 +#include "gpu_utils.h"
 +#include "nbnxn_cuda_data_mgmt.h"
 +
 +typedef struct { 
 +    gmx_integrator_t *func;
 +} gmx_intp_t;
 +
 +/* The array should match the eI array in include/types/enums.h */
 +#ifdef GMX_OPENMM  /* FIXME do_md_openmm needs fixing */
 +const gmx_intp_t integrator[eiNR] = { {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm},{do_md_openmm}};
 +#else
 +const gmx_intp_t integrator[eiNR] = { {do_md}, {do_steep}, {do_cg}, {do_md}, {do_md}, {do_nm}, {do_lbfgs}, {do_tpi}, {do_tpi}, {do_md}, {do_md},{do_md}};
 +#endif
 +
 +gmx_large_int_t     deform_init_init_step_tpx;
 +matrix              deform_init_box_tpx;
 +#ifdef GMX_THREAD_MPI
 +tMPI_Thread_mutex_t deform_init_box_mutex=TMPI_THREAD_MUTEX_INITIALIZER;
 +#endif
 +
 +
 +#ifdef GMX_THREAD_MPI
 +struct mdrunner_arglist
 +{
 +    gmx_hw_opt_t *hw_opt;
 +    FILE *fplog;
 +    t_commrec *cr;
 +    int nfile;
 +    const t_filenm *fnm;
 +    output_env_t oenv;
 +    gmx_bool bVerbose;
 +    gmx_bool bCompact;
 +    int nstglobalcomm;
 +    ivec ddxyz;
 +    int dd_node_order;
 +    real rdd;
 +    real rconstr;
 +    const char *dddlb_opt;
 +    real dlb_scale;
 +    const char *ddcsx;
 +    const char *ddcsy;
 +    const char *ddcsz;
 +    const char *nbpu_opt;
 +    int nsteps_cmdline;
 +    int nstepout;
 +    int resetstep;
 +    int nmultisim;
 +    int repl_ex_nst;
 +    int repl_ex_nex;
 +    int repl_ex_seed;
 +    real pforce;
 +    real cpt_period;
 +    real max_hours;
 +    const char *deviceOptions;
 +    unsigned long Flags;
 +    int ret; /* return value */
 +};
 +
 +
 +/* The function used for spawning threads. Extracts the mdrunner() 
 +   arguments from its one argument and calls mdrunner(), after making
 +   a commrec. */
 +static void mdrunner_start_fn(void *arg)
 +{
 +    struct mdrunner_arglist *mda=(struct mdrunner_arglist*)arg;
 +    struct mdrunner_arglist mc=*mda; /* copy the arg list to make sure 
 +                                        that it's thread-local. This doesn't
 +                                        copy pointed-to items, of course,
 +                                        but those are all const. */
 +    t_commrec *cr;  /* we need a local version of this */
 +    FILE *fplog=NULL;
 +    t_filenm *fnm;
 +
 +    fnm = dup_tfn(mc.nfile, mc.fnm);
 +
 +    cr = init_par_threads(mc.cr);
 +
 +    if (MASTER(cr))
 +    {
 +        fplog=mc.fplog;
 +    }
 +
 +    mda->ret=mdrunner(mc.hw_opt, fplog, cr, mc.nfile, fnm, mc.oenv, 
 +                      mc.bVerbose, mc.bCompact, mc.nstglobalcomm, 
 +                      mc.ddxyz, mc.dd_node_order, mc.rdd,
 +                      mc.rconstr, mc.dddlb_opt, mc.dlb_scale, 
 +                      mc.ddcsx, mc.ddcsy, mc.ddcsz,
 +                      mc.nbpu_opt,
 +                      mc.nsteps_cmdline, mc.nstepout, mc.resetstep,
 +                      mc.nmultisim, mc.repl_ex_nst, mc.repl_ex_nex, mc.repl_ex_seed, mc.pforce, 
 +                      mc.cpt_period, mc.max_hours, mc.deviceOptions, mc.Flags);
 +}
 +
 +/* called by mdrunner() to start a specific number of threads (including 
 +   the main thread) for thread-parallel runs. This in turn calls mdrunner()
 +   for each thread. 
 +   All options besides nthreads are the same as for mdrunner(). */
 +static t_commrec *mdrunner_start_threads(gmx_hw_opt_t *hw_opt, 
 +              FILE *fplog,t_commrec *cr,int nfile, 
 +              const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose,
 +              gmx_bool bCompact, int nstglobalcomm,
 +              ivec ddxyz,int dd_node_order,real rdd,real rconstr,
 +              const char *dddlb_opt,real dlb_scale,
 +              const char *ddcsx,const char *ddcsy,const char *ddcsz,
 +              const char *nbpu_opt,
 +              int nsteps_cmdline, int nstepout,int resetstep,
 +              int nmultisim,int repl_ex_nst,int repl_ex_nex, int repl_ex_seed,
 +              real pforce,real cpt_period, real max_hours, 
 +              const char *deviceOptions, unsigned long Flags)
 +{
 +    int ret;
 +    struct mdrunner_arglist *mda;
 +    t_commrec *crn; /* the new commrec */
 +    t_filenm *fnmn;
 +
 +    /* first check whether we even need to start tMPI */
 +    if (hw_opt->nthreads_tmpi < 2)
 +    {
 +        return cr;
 +    }
 +
 +    /* a few small, one-time, almost unavoidable memory leaks: */
 +    snew(mda,1);
 +    fnmn=dup_tfn(nfile, fnm);
 +
 +    /* fill the data structure to pass as void pointer to thread start fn */
 +    mda->hw_opt=hw_opt;
 +    mda->fplog=fplog;
 +    mda->cr=cr;
 +    mda->nfile=nfile;
 +    mda->fnm=fnmn;
 +    mda->oenv=oenv;
 +    mda->bVerbose=bVerbose;
 +    mda->bCompact=bCompact;
 +    mda->nstglobalcomm=nstglobalcomm;
 +    mda->ddxyz[XX]=ddxyz[XX];
 +    mda->ddxyz[YY]=ddxyz[YY];
 +    mda->ddxyz[ZZ]=ddxyz[ZZ];
 +    mda->dd_node_order=dd_node_order;
 +    mda->rdd=rdd;
 +    mda->rconstr=rconstr;
 +    mda->dddlb_opt=dddlb_opt;
 +    mda->dlb_scale=dlb_scale;
 +    mda->ddcsx=ddcsx;
 +    mda->ddcsy=ddcsy;
 +    mda->ddcsz=ddcsz;
 +    mda->nbpu_opt=nbpu_opt;
 +    mda->nsteps_cmdline=nsteps_cmdline;
 +    mda->nstepout=nstepout;
 +    mda->resetstep=resetstep;
 +    mda->nmultisim=nmultisim;
 +    mda->repl_ex_nst=repl_ex_nst;
 +    mda->repl_ex_nex=repl_ex_nex;
 +    mda->repl_ex_seed=repl_ex_seed;
 +    mda->pforce=pforce;
 +    mda->cpt_period=cpt_period;
 +    mda->max_hours=max_hours;
 +    mda->deviceOptions=deviceOptions;
 +    mda->Flags=Flags;
 +
- /* Check the process affinity mask and if it is found to be non-zero,
-  * will honor it and disable mdrun internal affinity setting.
-  * This function should be called first before the OpenMP library gets
-  * initialized with the last argument FALSE (which will detect affinity
-  * set by external tools like taskset), and later, after the OpenMP
-  * initialization, with the last argument TRUE to detect affinity changes
-  * made by the OpenMP library.
 +    /* now spawn new threads that start mdrunner_start_fn(), while 
 +       the main thread returns */
 +    ret=tMPI_Init_fn(TRUE, hw_opt->nthreads_tmpi,
 +                     (hw_opt->bThreadPinning ? TMPI_AFFINITY_ALL_CORES : TMPI_AFFINITY_NONE),
 +                     mdrunner_start_fn, (void*)(mda) );
 +    if (ret!=TMPI_SUCCESS)
 +        return NULL;
 +
 +    /* make a new comm_rec to reflect the new situation */
 +    crn=init_par_threads(cr);
 +    return crn;
 +}
 +
 +
 +static int get_tmpi_omp_thread_division(const gmx_hw_info_t *hwinfo,
 +                                        const gmx_hw_opt_t *hw_opt,
 +                                        int nthreads_tot,
 +                                        int ngpu)
 +{
 +    int nthreads_tmpi;
 +
 +    /* There are no separate PME nodes here, as we ensured in
 +     * check_and_update_hw_opt that nthreads_tmpi>0 with PME nodes
 +     * and a conditional ensures we would not have ended up here.
 +     * Note that separate PME nodes might be switched on later.
 +     */
 +    if (ngpu > 0)
 +    {
 +        nthreads_tmpi = ngpu;
 +        if (nthreads_tot > 0 && nthreads_tot < nthreads_tmpi)
 +        {
 +            nthreads_tmpi = nthreads_tot;
 +        }
 +    }
 +    else if (hw_opt->nthreads_omp > 0)
 +    {
 +        /* Here we could oversubscribe, when we do, we issue a warning later */
 +        nthreads_tmpi = max(1,nthreads_tot/hw_opt->nthreads_omp);
 +    }
 +    else
 +    {
 +        /* TODO choose nthreads_omp based on hardware topology
 +           when we have a hardware topology detection library */
 +        /* In general, when running up to 4 threads, OpenMP should be faster.
 +         * Note: on AMD Bulldozer we should avoid running OpenMP over two dies.
 +         * On Intel>=Nehalem running OpenMP on a single CPU is always faster,
 +         * even on two CPUs it's usually faster (but with many OpenMP threads
 +         * it could be faster not to use HT, currently we always use HT).
 +         * On Nehalem/Westmere we want to avoid running 16 threads over
 +         * two CPUs with HT, so we need a limit<16; thus we use 12.
 +         * A reasonable limit for Intel Sandy and Ivy bridge,
 +         * not knowing the topology, is 16 threads.
 +         */
 +        const int nthreads_omp_always_faster             =  4;
 +        const int nthreads_omp_always_faster_Nehalem     = 12;
 +        const int nthreads_omp_always_faster_SandyBridge = 16;
 +        const int first_model_Nehalem     = 0x1A;
 +        const int first_model_SandyBridge = 0x2A;
 +        gmx_bool bIntel_Family6;
 +
 +        bIntel_Family6 =
 +            (gmx_cpuid_vendor(hwinfo->cpuid_info) == GMX_CPUID_VENDOR_INTEL &&
 +             gmx_cpuid_family(hwinfo->cpuid_info) == 6);
 +
 +        if (nthreads_tot <= nthreads_omp_always_faster ||
 +            (bIntel_Family6 &&
 +             ((gmx_cpuid_model(hwinfo->cpuid_info) >= nthreads_omp_always_faster_Nehalem && nthreads_tot <= nthreads_omp_always_faster_Nehalem) ||
 +              (gmx_cpuid_model(hwinfo->cpuid_info) >= nthreads_omp_always_faster_SandyBridge && nthreads_tot <= nthreads_omp_always_faster_SandyBridge))))
 +        {
 +            /* Use pure OpenMP parallelization */
 +            nthreads_tmpi = 1;
 +        }
 +        else
 +        {
 +            /* Don't use OpenMP parallelization */
 +            nthreads_tmpi = nthreads_tot;
 +        }
 +    }
 +
 +    return nthreads_tmpi;
 +}
 +
 +
 +/* Get the number of threads to use for thread-MPI based on how many
 + * were requested, which algorithms we're using,
 + * and how many particles there are.
 + * At the point we have already called check_and_update_hw_opt.
 + * Thus all options should be internally consistent and consistent
 + * with the hardware, except that ntmpi could be larger than #GPU.
 + */
 +static int get_nthreads_mpi(gmx_hw_info_t *hwinfo,
 +                            gmx_hw_opt_t *hw_opt,
 +                            t_inputrec *inputrec, gmx_mtop_t *mtop,
 +                            const t_commrec *cr,
 +                            FILE *fplog)
 +{
 +    int nthreads_hw,nthreads_tot_max,nthreads_tmpi,nthreads_new,ngpu;
 +    int min_atoms_per_mpi_thread;
 +    char *env;
 +    char sbuf[STRLEN];
 +    gmx_bool bCanUseGPU;
 +
 +    if (hw_opt->nthreads_tmpi > 0)
 +    {
 +        /* Trivial, return right away */
 +        return hw_opt->nthreads_tmpi;
 +    }
 +
 +    nthreads_hw = hwinfo->nthreads_hw_avail;
 +
 +    /* How many total (#tMPI*#OpenMP) threads can we start? */ 
 +    if (hw_opt->nthreads_tot > 0)
 +    {
 +        nthreads_tot_max = hw_opt->nthreads_tot;
 +    }
 +    else
 +    {
 +        nthreads_tot_max = nthreads_hw;
 +    }
 +
 +    bCanUseGPU = (inputrec->cutoff_scheme == ecutsVERLET && hwinfo->bCanUseGPU);
 +    if (bCanUseGPU)
 +    {
 +        ngpu = hwinfo->gpu_info.ncuda_dev_use;
 +    }
 +    else
 +    {
 +        ngpu = 0;
 +    }
 +
 +    nthreads_tmpi =
 +        get_tmpi_omp_thread_division(hwinfo,hw_opt,nthreads_tot_max,ngpu);
 +
 +    if (inputrec->eI == eiNM || EI_TPI(inputrec->eI))
 +    {
 +        /* Steps are divided over the nodes iso splitting the atoms */
 +        min_atoms_per_mpi_thread = 0;
 +    }
 +    else
 +    {
 +        if (bCanUseGPU)
 +        {
 +            min_atoms_per_mpi_thread = MIN_ATOMS_PER_GPU;
 +        }
 +        else
 +        {
 +            min_atoms_per_mpi_thread = MIN_ATOMS_PER_MPI_THREAD;
 +        }
 +    }
 +
 +    /* Check if an algorithm does not support parallel simulation.  */
 +    if (nthreads_tmpi != 1 &&
 +        ( inputrec->eI == eiLBFGS ||
 +          inputrec->coulombtype == eelEWALD ) )
 +    {
 +        nthreads_tmpi = 1;
 +
 +        md_print_warn(cr,fplog,"The integration or electrostatics algorithm doesn't support parallel runs. Using a single thread-MPI thread.\n");
 +        if (hw_opt->nthreads_tmpi > nthreads_tmpi)
 +        {
 +            gmx_fatal(FARGS,"You asked for more than 1 thread-MPI thread, but an algorithm doesn't support that");
 +        }
 +    }
 +    else if (mtop->natoms/nthreads_tmpi < min_atoms_per_mpi_thread)
 +    {
 +        /* the thread number was chosen automatically, but there are too many
 +           threads (too few atoms per thread) */
 +        nthreads_new = max(1,mtop->natoms/min_atoms_per_mpi_thread);
 +
 +        /* Avoid partial use of Hyper-Threading */
 +        if (gmx_cpuid_x86_smt(hwinfo->cpuid_info) == GMX_CPUID_X86_SMT_ENABLED &&
 +            nthreads_new > nthreads_hw/2 && nthreads_new < nthreads_hw)
 +        {
 +            nthreads_new = nthreads_hw/2;
 +        }
 +
 +        /* Avoid large prime numbers in the thread count */
 +        if (nthreads_new >= 6)
 +        {
 +            /* Use only 6,8,10 with additional factors of 2 */
 +            int fac;
 +
 +            fac = 2;
 +            while (3*fac*2 <= nthreads_new)
 +            {
 +                fac *= 2;
 +            }
 +
 +            nthreads_new = (nthreads_new/fac)*fac;
 +        }
 +        else
 +        {
 +            /* Avoid 5 */
 +            if (nthreads_new == 5)
 +            {
 +                nthreads_new = 4;
 +            }
 +        }
 +
 +        nthreads_tmpi = nthreads_new;
 +
 +        fprintf(stderr,"\n");
 +        fprintf(stderr,"NOTE: Parallelization is limited by the small number of atoms,\n");
 +        fprintf(stderr,"      only starting %d thread-MPI threads.\n",nthreads_tmpi);
 +        fprintf(stderr,"      You can use the -nt and/or -ntmpi option to optimize the number of threads.\n\n");
 +    }
 +
 +    return nthreads_tmpi;
 +}
 +#endif /* GMX_THREAD_MPI */
 +
 +
 +/* Environment variable for setting nstlist */
 +static const char*  NSTLIST_ENVVAR          =  "GMX_NSTLIST";
 +/* Try to increase nstlist when using a GPU with nstlist less than this */
 +static const int    NSTLIST_GPU_ENOUGH      = 20;
 +/* Increase nstlist until the non-bonded cost increases more than this factor */
 +static const float  NBNXN_GPU_LIST_OK_FAC   = 1.25;
 +/* Don't increase nstlist beyond a non-bonded cost increases of this factor */
 +static const float  NBNXN_GPU_LIST_MAX_FAC  = 1.40;
 +
 +/* Try to increase nstlist when running on a GPU */
 +static void increase_nstlist(FILE *fp,t_commrec *cr,
 +                             t_inputrec *ir,const gmx_mtop_t *mtop,matrix box)
 +{
 +    char *env;
 +    int  nstlist_orig,nstlist_prev;
 +    verletbuf_list_setup_t ls;
 +    real rlist_inc,rlist_ok,rlist_max,rlist_new,rlist_prev;
 +    int  i;
 +    t_state state_tmp;
 +    gmx_bool bBox,bDD,bCont;
 +    const char *nstl_fmt="\nFor optimal performance with a GPU nstlist (now %d) should be larger.\nThe optimum depends on your CPU and GPU resources.\nYou might want to try several nstlist values.\n";
 +    const char *vbd_err="Can not increase nstlist for GPU run because verlet-buffer-drift is not set or used";
 +    const char *box_err="Can not increase nstlist for GPU run because the box is too small";
 +    const char *dd_err ="Can not increase nstlist for GPU run because of domain decomposition limitations";
 +    char buf[STRLEN];
 +
 +    /* Number of + nstlist alternative values to try when switching  */
 +    const int nstl[]={ 20, 25, 40, 50 };
 +#define NNSTL  sizeof(nstl)/sizeof(nstl[0])
 +
 +    env = getenv(NSTLIST_ENVVAR);
 +    if (env == NULL)
 +    {
 +        if (fp != NULL)
 +        {
 +            fprintf(fp,nstl_fmt,ir->nstlist);
 +        }
 +    }
 +
 +    if (ir->verletbuf_drift == 0)
 +    {
 +        gmx_fatal(FARGS,"You are using an old tpr file with a GPU, please generate a new tpr file with an up to date version of grompp");
 +    }
 +
 +    if (ir->verletbuf_drift < 0)
 +    {
 +        if (MASTER(cr))
 +        {
 +            fprintf(stderr,"%s\n",vbd_err);
 +        }
 +        if (fp != NULL)
 +        {
 +            fprintf(fp,"%s\n",vbd_err);
 +        }
 +
 +        return;
 +    }
 +
 +    nstlist_orig = ir->nstlist;
 +    if (env != NULL)
 +    {
 +        sprintf(buf,"Getting nstlist from environment variable GMX_NSTLIST=%s",env);
 +        if (MASTER(cr))
 +        {
 +            fprintf(stderr,"%s\n",buf);
 +        }
 +        if (fp != NULL)
 +        {
 +            fprintf(fp,"%s\n",buf);
 +        }
 +        sscanf(env,"%d",&ir->nstlist);
 +    }
 +
 +    verletbuf_get_list_setup(TRUE,&ls);
 +
 +    /* Allow rlist to make the list double the size of the cut-off sphere */
 +    rlist_inc = nbnxn_get_rlist_effective_inc(NBNXN_GPU_CLUSTER_SIZE,mtop->natoms/det(box));
 +    rlist_ok  = (max(ir->rvdw,ir->rcoulomb) + rlist_inc)*pow(NBNXN_GPU_LIST_OK_FAC,1.0/3.0) - rlist_inc;
 +    rlist_max = (max(ir->rvdw,ir->rcoulomb) + rlist_inc)*pow(NBNXN_GPU_LIST_MAX_FAC,1.0/3.0) - rlist_inc;
 +    if (debug)
 +    {
 +        fprintf(debug,"GPU nstlist tuning: rlist_inc %.3f rlist_max %.3f\n",
 +                rlist_inc,rlist_max);
 +    }
 +
 +    i = 0;
 +    nstlist_prev = nstlist_orig;
 +    rlist_prev   = ir->rlist;
 +    do
 +    {
 +        if (env == NULL)
 +        {
 +            ir->nstlist = nstl[i];
 +        }
 +
 +        /* Set the pair-list buffer size in ir */
 +        calc_verlet_buffer_size(mtop,det(box),ir,ir->verletbuf_drift,&ls,
 +                                NULL,&rlist_new);
 +
 +        /* Does rlist fit in the box? */
 +        bBox = (sqr(rlist_new) < max_cutoff2(ir->ePBC,box));
 +        bDD  = TRUE;
 +        if (bBox && DOMAINDECOMP(cr))
 +        {
 +            /* Check if rlist fits in the domain decomposition */
 +            if (inputrec2nboundeddim(ir) < DIM)
 +            {
 +                gmx_incons("Changing nstlist with domain decomposition and unbounded dimensions is not implemented yet");
 +            }
 +            copy_mat(box,state_tmp.box);
 +            bDD = change_dd_cutoff(cr,&state_tmp,ir,rlist_new);
 +        }
 +
 +        bCont = FALSE;
 +
 +        if (env == NULL)
 +        {
 +            if (bBox && bDD && rlist_new <= rlist_max)
 +            {
 +                /* Increase nstlist */
 +                nstlist_prev = ir->nstlist;
 +                rlist_prev   = rlist_new;
 +                bCont = (i+1 < NNSTL && rlist_new < rlist_ok);
 +            }
 +            else
 +            {
 +                /* Stick with the previous nstlist */
 +                ir->nstlist = nstlist_prev;
 +                rlist_new   = rlist_prev;
 +                bBox = TRUE;
 +                bDD  = TRUE;
 +            }
 +        }
 +
 +        i++;
 +    }
 +    while (bCont);
 +
 +    if (!bBox || !bDD)
 +    {
 +        gmx_warning(!bBox ? box_err : dd_err);
 +        if (fp != NULL)
 +        {
 +            fprintf(fp,"\n%s\n",bBox ? box_err : dd_err);
 +        }
 +        ir->nstlist = nstlist_orig;
 +    }
 +    else if (ir->nstlist != nstlist_orig || rlist_new != ir->rlist)
 +    {
 +        sprintf(buf,"Changing nstlist from %d to %d, rlist from %g to %g",
 +                nstlist_orig,ir->nstlist,
 +                ir->rlist,rlist_new);
 +        if (MASTER(cr))
 +        {
 +            fprintf(stderr,"%s\n\n",buf);
 +        }
 +        if (fp != NULL)
 +        {
 +            fprintf(fp,"%s\n\n",buf);
 +        }
 +        ir->rlist     = rlist_new;
 +        ir->rlistlong = rlist_new;
 +    }
 +}
 +
 +static void prepare_verlet_scheme(FILE *fplog,
 +                                  gmx_hw_info_t *hwinfo,
 +                                  t_commrec *cr,
 +                                  gmx_hw_opt_t *hw_opt,
 +                                  const char *nbpu_opt,
 +                                  t_inputrec *ir,
 +                                  const gmx_mtop_t *mtop,
 +                                  matrix box,
 +                                  gmx_bool *bUseGPU)
 +{
 +    /* Here we only check for GPU usage on the MPI master process,
 +     * as here we don't know how many GPUs we will use yet.
 +     * We check for a GPU on all processes later.
 +     */
 +    *bUseGPU = hwinfo->bCanUseGPU || (getenv("GMX_EMULATE_GPU") != NULL);
 +
 +    if (ir->verletbuf_drift > 0)
 +    {
 +        /* Update the Verlet buffer size for the current run setup */
 +        verletbuf_list_setup_t ls;
 +        real rlist_new;
 +
 +        /* Here we assume CPU acceleration is on. But as currently
 +         * calc_verlet_buffer_size gives the same results for 4x8 and 4x4
 +         * and 4x2 gives a larger buffer than 4x4, this is ok.
 +         */
 +        verletbuf_get_list_setup(*bUseGPU,&ls);
 +
 +        calc_verlet_buffer_size(mtop,det(box),ir,
 +                                ir->verletbuf_drift,&ls,
 +                                NULL,&rlist_new);
 +        if (rlist_new != ir->rlist)
 +        {
 +            if (fplog != NULL)
 +            {
 +                fprintf(fplog,"\nChanging rlist from %g to %g for non-bonded %dx%d atom kernels\n\n",
 +                        ir->rlist,rlist_new,
 +                        ls.cluster_size_i,ls.cluster_size_j);
 +            }
 +            ir->rlist     = rlist_new;
 +            ir->rlistlong = rlist_new;
 +        }
 +    }
 +
 +    /* With GPU or emulation we should check nstlist for performance */
 +    if ((EI_DYNAMICS(ir->eI) &&
 +         *bUseGPU &&
 +         ir->nstlist < NSTLIST_GPU_ENOUGH) ||
 +        getenv(NSTLIST_ENVVAR) != NULL)
 +    {
 +        /* Choose a better nstlist */
 +        increase_nstlist(fplog,cr,ir,mtop,box);
 +    }
 +}
 +
 +static void convert_to_verlet_scheme(FILE *fplog,
 +                                     t_inputrec *ir,
 +                                     gmx_mtop_t *mtop,real box_vol)
 +{
 +    char *conv_mesg="Converting input file with group cut-off scheme to the Verlet cut-off scheme";
 +
 +    md_print_warn(NULL,fplog,"%s\n",conv_mesg);
 +
 +    ir->cutoff_scheme   = ecutsVERLET;
 +    ir->verletbuf_drift = 0.005;
 +
 +    if (ir->rcoulomb != ir->rvdw)
 +    {
 +        gmx_fatal(FARGS,"The VdW and Coulomb cut-offs are different, whereas the Verlet scheme only supports equal cut-offs");
 +    }
 +
 +    if (ir->vdwtype == evdwUSER || EEL_USER(ir->coulombtype))
 +    {
 +        gmx_fatal(FARGS,"User non-bonded potentials are not (yet) supported with the Verlet scheme");
 +    }
 +    else if (EVDW_SWITCHED(ir->vdwtype) || EEL_SWITCHED(ir->coulombtype))
 +    {
 +        md_print_warn(NULL,fplog,"Converting switched or shifted interactions to a shifted potential (without force shift), this will lead to slightly different interaction potentials");
 +
 +        if (EVDW_SWITCHED(ir->vdwtype))
 +        {
 +            ir->vdwtype = evdwCUT;
 +        }
 +        if (EEL_SWITCHED(ir->coulombtype))
 +        {
 +            if (EEL_FULL(ir->coulombtype))
 +            {
 +                /* With full electrostatic only PME can be switched */
 +                ir->coulombtype = eelPME;
 +            }
 +            else
 +            {
 +                md_print_warn(NULL,fplog,"NOTE: Replacing %s electrostatics with reaction-field with epsilon-rf=inf\n",eel_names[ir->coulombtype]);
 +                ir->coulombtype = eelRF;
 +                ir->epsilon_rf  = 0.0;
 +            }
 +        }
 +
 +        /* We set the target energy drift to a small number.
 +         * Note that this is only for testing. For production the user
 +         * should think about this and set the mdp options.
 +         */
 +        ir->verletbuf_drift = 1e-4;
 +    }
 +
 +    if (inputrec2nboundeddim(ir) != 3)
 +    {
 +        gmx_fatal(FARGS,"Can only convert old tpr files to the Verlet cut-off scheme with 3D pbc");
 +    }
 +
 +    if (ir->efep != efepNO || ir->implicit_solvent != eisNO)
 +    {
 +        gmx_fatal(FARGS,"Will not convert old tpr files to the Verlet cut-off scheme with free-energy calculations or implicit solvent");
 +    }
 +
 +    if (EI_DYNAMICS(ir->eI) && !(EI_MD(ir->eI) && ir->etc == etcNO))
 +    {
 +        verletbuf_list_setup_t ls;
 +
 +        verletbuf_get_list_setup(FALSE,&ls);
 +        calc_verlet_buffer_size(mtop,box_vol,ir,ir->verletbuf_drift,&ls,
 +                                NULL,&ir->rlist);
 +    }
 +    else
 +    {
 +        ir->verletbuf_drift = -1;
 +        ir->rlist           = 1.05*max(ir->rvdw,ir->rcoulomb);
 +    }
 +
 +    gmx_mtop_remove_chargegroups(mtop);
 +}
 +
-  * Note that this will only work on Linux as we use a GNU feature. */
++/* Check the process affinity mask. If it is non-zero, something
++ * else has set the affinity, and mdrun should honor that and
++ * not attempt to do its own thread pinning.
++ *
++ * This function should be called twice. Once before the OpenMP
++ * library gets initialized with bAfterOpenMPInit=FALSE (which will
++ * detect affinity set by external tools like taskset), and again
++ * later, after the OpenMP initialization, with bAfterOpenMPInit=TRUE
++ * (which will detect affinity changes made by the OpenMP library).
 + *
-                           "Non-default process affinity set, disabling internal affinity");
++ * Note that this will only work on Linux, because we use a GNU
++ * feature. */
 +static void check_cpu_affinity_set(FILE *fplog, const t_commrec *cr,
 +                                   gmx_hw_opt_t *hw_opt, int ncpus,
 +                                   gmx_bool bAfterOpenmpInit)
 +{
 +#ifdef HAVE_SCHED_GETAFFINITY
 +    cpu_set_t mask_current;
 +    int       i, ret, cpu_count, cpu_set;
 +    gmx_bool  bAllSet;
 +
 +    assert(hw_opt);
 +    if (!hw_opt->bThreadPinning)
 +    {
 +        /* internal affinity setting is off, don't bother checking process affinity */
 +        return;
 +    }
 +
 +    CPU_ZERO(&mask_current);
 +    if ((ret = sched_getaffinity(0, sizeof(cpu_set_t), &mask_current)) != 0)
 +    {
 +        /* failed to query affinity mask, will just return */
 +        if (debug)
 +        {
 +            fprintf(debug, "Failed to query affinity mask (error %d)", ret);
 +        }
 +        return;
 +    }
 +
 +    /* Before proceeding with the actual check, make sure that the number of
 +     * detected CPUs is >= the CPUs in the current set.
 +     * We need to check for CPU_COUNT as it was added only in glibc 2.6. */
 +#ifdef CPU_COUNT
 +    if (ncpus < CPU_COUNT(&mask_current))
 +    {
 +        if (debug)
 +        {
 +            fprintf(debug, "%d CPUs detected, but %d was returned by CPU_COUNT",
 +                    ncpus, CPU_COUNT(&mask_current));
 +        }
 +        return;
 +    }
 +#endif /* CPU_COUNT */
 +
 +    bAllSet = TRUE;
 +    for (i = 0; (i < ncpus && i < CPU_SETSIZE); i++)
 +    {
 +        bAllSet = bAllSet && (CPU_ISSET(i, &mask_current) != 0);
 +    }
 +
 +    if (!bAllSet)
 +    {
 +        if (!bAfterOpenmpInit)
 +        {
 +            md_print_warn(cr, fplog,
-                           "Non-default process affinity set probably by the OpenMP library, "
-                           "disabling internal affinity");
++                          "%s detected a non-default process affinity, "
++                          "so it will not attempt to pin its threads", ShortProgram());
 +        }
 +        else
 +        {
 +            md_print_warn(cr, fplog,
-             fprintf(debug, "Non-default affinity mask found\n");
++                          "%s detected a non-default process affinity, "
++                          "probably set by the OpenMP library, "
++                          "so it will not attempt to pin its threads", ShortProgram());
 +        }
 +        hw_opt->bThreadPinning = FALSE;
 +
 +        if (debug)
 +        {
-             /* check if some threads failed to set their affinities */
++            fprintf(debug, "Non-default affinity mask found, mdrun will not pin threads\n");
 +        }
 +    }
 +    else
 +    {
 +        if (debug)
 +        {
 +            fprintf(debug, "Default affinity mask found\n");
 +        }
 +    }
 +#endif /* HAVE_SCHED_GETAFFINITY */
 +}
 +
 +/* Set CPU affinity. Can be important for performance.
 +   On some systems (e.g. Cray) CPU Affinity is set by default.
 +   But default assigning doesn't work (well) with only some ranks
 +   having threads. This causes very low performance.
 +   External tools have cumbersome syntax for setting affinity
 +   in the case that only some ranks have threads.
 +   Thus it is important that GROMACS sets the affinity internally
 +   if only PME is using threads.
 +*/
 +static void set_cpu_affinity(FILE *fplog,
 +                             const t_commrec *cr,
 +                             gmx_hw_opt_t *hw_opt,
 +                             int nthreads_pme,
 +                             const gmx_hw_info_t *hwinfo,
 +                             const t_inputrec *inputrec)
 +{
 +#if defined GMX_THREAD_MPI
 +    /* With the number of TMPI threads equal to the number of cores
 +     * we already pinned in thread-MPI, so don't pin again here.
 +     */
 +    if (hw_opt->nthreads_tmpi == tMPI_Thread_get_hw_number())
 +    {
 +        return;
 +    }
 +#endif
 +
 +#ifndef __APPLE__
 +    /* If the tMPI thread affinity setting is not supported encourage the user
 +     * to report it as it's either a bug or an exotic platform which we might
 +     * want to support. */
 +    if (tMPI_Thread_setaffinity_support() != TMPI_SETAFFINITY_SUPPORT_YES)
 +    {
 +        md_print_warn(NULL, fplog,
 +                      "Can not set thread affinities on the current plarform. On NUMA systems this\n"
 +                      "can cause performance degradation. If you think your platform should support\n"
 +                      "setting affinities, contact the GROMACS developers.");
 +        return;
 +    }
 +#endif /* __APPLE__ */
 +
 +    if (hw_opt->bThreadPinning)
 +    {
 +        int nth_affinity_set, thread_id_node, thread_id,
 +            nthread_local, nthread_node, nthread_hw_max, nphyscore;
 +        int offset;
 +        char *env;
 +
 +        /* threads on this MPI process or TMPI thread */
 +        if (cr->duty & DUTY_PP)
 +        {
 +            nthread_local = gmx_omp_nthreads_get(emntNonbonded);
 +        }
 +        else
 +        {
 +            nthread_local = gmx_omp_nthreads_get(emntPME);
 +        }
 +
 +        /* map the current process to cores */
 +        thread_id_node = 0;
 +        nthread_node = nthread_local;
 +#ifdef GMX_MPI
 +        if (PAR(cr) || MULTISIM(cr))
 +        {
 +            /* We need to determine a scan of the thread counts in this
 +             * compute node.
 +             */
 +            MPI_Comm comm_intra;
 +
 +            MPI_Comm_split(MPI_COMM_WORLD,gmx_hostname_num(),cr->rank_intranode,
 +                           &comm_intra);
 +            MPI_Scan(&nthread_local,&thread_id_node,1,MPI_INT,MPI_SUM,comm_intra);
 +            /* MPI_Scan is inclusive, but here we need exclusive */
 +            thread_id_node -= nthread_local;
 +            /* Get the total number of threads on this physical node */
 +            MPI_Allreduce(&nthread_local,&nthread_node,1,MPI_INT,MPI_SUM,comm_intra);
 +            MPI_Comm_free(&comm_intra);
 +        }
 +#endif
 +
 +        offset = 0;
 +        if (hw_opt->core_pinning_offset > 0)
 +        {
 +            offset = hw_opt->core_pinning_offset;
 +            if (SIMMASTER(cr))
 +            {
 +                fprintf(stderr, "Applying core pinning offset %d\n", offset);
 +            }
 +            if (fplog)
 +            {
 +                fprintf(fplog, "Applying core pinning offset %d\n", offset);
 +            }
 +        }
 +
 +        /* With Intel Hyper-Threading enabled, we want to pin consecutive
 +         * threads to physical cores when using more threads than physical
 +         * cores or when the user requests so.
 +         */
 +        nthread_hw_max = hwinfo->nthreads_hw_avail;
 +        nphyscore = -1;
 +        if (hw_opt->bPinHyperthreading ||
 +            (gmx_cpuid_x86_smt(hwinfo->cpuid_info) == GMX_CPUID_X86_SMT_ENABLED &&
 +             nthread_node > nthread_hw_max/2 && getenv("GMX_DISABLE_PINHT") == NULL))
 +        {
 +            if (gmx_cpuid_x86_smt(hwinfo->cpuid_info) != GMX_CPUID_X86_SMT_ENABLED)
 +            {
 +                /* We print to stderr on all processes, as we might have
 +                 * different settings on different physical nodes.
 +                 */
 +                if (gmx_cpuid_vendor(hwinfo->cpuid_info) != GMX_CPUID_VENDOR_INTEL)
 +                {
 +                    md_print_warn(NULL, fplog, "Pinning for Hyper-Threading layout requested, "
 +                                  "but non-Intel CPU detected (vendor: %s)\n",
 +                                  gmx_cpuid_vendor_string[gmx_cpuid_vendor(hwinfo->cpuid_info)]);
 +                }
 +                else
 +                {
 +                    md_print_warn(NULL, fplog, "Pinning for Hyper-Threading layout requested, "
 +                                  "but the CPU detected does not have Intel Hyper-Threading support "
 +                                  "(or it is turned off)\n");
 +                }
 +            }
 +            nphyscore = nthread_hw_max/2;
 +
 +            if (SIMMASTER(cr))
 +            {
 +                fprintf(stderr, "Pinning to Hyper-Threading cores with %d physical cores in a compute node\n",
 +                        nphyscore);
 +            }
 +            if (fplog)
 +            {
 +                fprintf(fplog, "Pinning to Hyper-Threading cores with %d physical cores in a compute node\n",
 +                        nphyscore);
 +            }
 +        }
 +
 +        /* Set the per-thread affinity. In order to be able to check the success
 +         * of affinity settings, we will set nth_affinity_set to 1 on threads
 +         * where the affinity setting succeded and to 0 where it failed.
 +         * Reducing these 0/1 values over the threads will give the total number
 +         * of threads on which we succeeded.
 +         */
 +         nth_affinity_set = 0;
 +#pragma omp parallel firstprivate(thread_id_node) num_threads(nthread_local) \
 +                     reduction(+:nth_affinity_set)
 +        {
 +            int      core;
 +            gmx_bool setaffinity_ret;
 +
 +            thread_id       = gmx_omp_get_thread_num();
 +            thread_id_node += thread_id;
 +            if (nphyscore <= 0)
 +            {
 +                core = offset + thread_id_node;
 +            }
 +            else
 +            {
 +                /* Lock pairs of threads to the same hyperthreaded core */
 +                core = offset + thread_id_node/2 + (thread_id_node % 2)*nphyscore;
 +            }
 +
 +            setaffinity_ret = tMPI_Thread_setaffinity_single(tMPI_Thread_self(), core);
 +
 +            /* store the per-thread success-values of the setaffinity */
 +            nth_affinity_set = (setaffinity_ret == 0);
 +
 +            if (debug)
 +            {
 +                fprintf(debug, "On rank %2d, thread %2d, core %2d the affinity setting returned %d\n",
 +                        cr->nodeid, gmx_omp_get_thread_num(), core, setaffinity_ret);
 +            }
 +        }
 +
 +        if (nth_affinity_set > nthread_local)
 +        {
 +            char msg[STRLEN];
 +
 +            sprintf(msg, "Looks like we have set affinity for more threads than "
 +                    "we have (%d > %d)!\n", nth_affinity_set, nthread_local);
 +            gmx_incons(msg);
 +        }
 +        else
 +        {
-                 char sbuf[STRLEN];
-                 sbuf[0] = '\0';
++            /* check & warn if some threads failed to set their affinities */
 +            if (nth_affinity_set != nthread_local)
 +            {
-                 sprintf(sbuf, "In thread-MPI thread #%d", cr->nodeid);
++                char sbuf1[STRLEN], sbuf2[STRLEN];
++
++                /* sbuf1 contains rank info, while sbuf2 OpenMP thread info */
++                sbuf1[0] = sbuf2[0] = '\0';
 +#ifdef GMX_MPI
 +#ifdef GMX_THREAD_MPI
-                 sprintf(sbuf, "In MPI process #%d", cr->nodeid);
++                sprintf(sbuf1, "In thread-MPI thread #%d: ", cr->nodeid);
 +#else /* GMX_LIB_MPI */
++                sprintf(sbuf1, "In MPI process #%d: ", cr->nodeid);
 +#endif
-                               "%s%d/%d thread%s failed to set their affinities. "
-                               "This can cause performance degradation!",
-                               sbuf, nthread_local - nth_affinity_set, nthread_local,
-                               (nthread_local - nth_affinity_set) > 1 ? "s" : "");
 +#endif /* GMX_MPI */
++
++                if (nthread_local > 1)
++                {
++                    sprintf(sbuf2, "of %d/%d thread%s ",
++                            nthread_local - nth_affinity_set, nthread_local,
++                            (nthread_local - nth_affinity_set) > 1 ? "s" : "");
++                }
++
 +                md_print_warn(NULL, fplog,
-     /* remove when vv and rerun works correctly! */
-     if (PAR(cr) && EI_VV(inputrec->eI) && ((Flags & MD_RERUN) || (Flags & MD_RERUN_VSITE)))
-     {
-         gmx_fatal(FARGS,
-                   "Currently can't do velocity verlet with rerun in parallel.");
-     }
++                              "NOTE: %sAffinity setting %sfailed.\n"
++                              "      This can cause performance degradation!",
++                              sbuf1, sbuf2);
 +            }
 +        }
 +    }
 +}
 +
 +
 +static void check_and_update_hw_opt(gmx_hw_opt_t *hw_opt,
 +                                    int cutoff_scheme)
 +{
 +    gmx_omp_nthreads_read_env(&hw_opt->nthreads_omp);
 +
 +#ifndef GMX_THREAD_MPI
 +    if (hw_opt->nthreads_tot > 0)
 +    {
 +        gmx_fatal(FARGS,"Setting the total number of threads is only supported with thread-MPI and Gromacs was compiled without thread-MPI");
 +    }
 +    if (hw_opt->nthreads_tmpi > 0)
 +    {
 +        gmx_fatal(FARGS,"Setting the number of thread-MPI threads is only supported with thread-MPI and Gromacs was compiled without thread-MPI");
 +    }
 +#endif
 +
 +    if (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_omp_pme <= 0)
 +    {
 +        /* We have the same number of OpenMP threads for PP and PME processes,
 +         * thus we can perform several consistency checks.
 +         */
 +        if (hw_opt->nthreads_tmpi > 0 &&
 +            hw_opt->nthreads_omp > 0 &&
 +            hw_opt->nthreads_tot != hw_opt->nthreads_tmpi*hw_opt->nthreads_omp)
 +        {
 +            gmx_fatal(FARGS,"The total number of threads requested (%d) does not match the thread-MPI threads (%d) times the OpenMP threads (%d) requested",
 +                      hw_opt->nthreads_tot,hw_opt->nthreads_tmpi,hw_opt->nthreads_omp);
 +        }
 +
 +        if (hw_opt->nthreads_tmpi > 0 &&
 +            hw_opt->nthreads_tot % hw_opt->nthreads_tmpi != 0)
 +        {
 +            gmx_fatal(FARGS,"The total number of threads requested (%d) is not divisible by the number of thread-MPI threads requested (%d)",
 +                      hw_opt->nthreads_tot,hw_opt->nthreads_tmpi);
 +        }
 +
 +        if (hw_opt->nthreads_omp > 0 &&
 +            hw_opt->nthreads_tot % hw_opt->nthreads_omp != 0)
 +        {
 +            gmx_fatal(FARGS,"The total number of threads requested (%d) is not divisible by the number of OpenMP threads requested (%d)",
 +                      hw_opt->nthreads_tot,hw_opt->nthreads_omp);
 +        }
 +
 +        if (hw_opt->nthreads_tmpi > 0 &&
 +            hw_opt->nthreads_omp <= 0)
 +        {
 +            hw_opt->nthreads_omp = hw_opt->nthreads_tot/hw_opt->nthreads_tmpi;
 +        }
 +    }
 +
 +#ifndef GMX_OPENMP
 +    if (hw_opt->nthreads_omp > 1)
 +    {
 +        gmx_fatal(FARGS,"OpenMP threads are requested, but Gromacs was compiled without OpenMP support");
 +    }
 +#endif
 +
 +    if (cutoff_scheme == ecutsGROUP)
 +    {
 +        /* We only have OpenMP support for PME only nodes */
 +        if (hw_opt->nthreads_omp > 1)
 +        {
 +            gmx_fatal(FARGS,"OpenMP threads have been requested with cut-off scheme %s, but these are only supported with cut-off scheme %s",
 +                      ecutscheme_names[cutoff_scheme],
 +                      ecutscheme_names[ecutsVERLET]);
 +        }
 +        hw_opt->nthreads_omp = 1;
 +    }
 +
 +    if (hw_opt->nthreads_omp_pme > 0 && hw_opt->nthreads_omp <= 0)
 +    {
 +        gmx_fatal(FARGS,"You need to specify -ntomp in addition to -ntomp_pme");
 +    }
 +
 +    if (hw_opt->nthreads_tot == 1)
 +    {
 +        hw_opt->nthreads_tmpi = 1;
 +
 +        if (hw_opt->nthreads_omp > 1)
 +        {
 +            gmx_fatal(FARGS,"You requested %d OpenMP threads with %d total threads",
 +                      hw_opt->nthreads_tmpi,hw_opt->nthreads_tot);
 +        }
 +        hw_opt->nthreads_omp = 1;
 +    }
 +
 +    if (hw_opt->nthreads_omp_pme <= 0 && hw_opt->nthreads_omp > 0)
 +    {
 +        hw_opt->nthreads_omp_pme = hw_opt->nthreads_omp;
 +    }
 +
 +    if (debug)
 +    {
 +        fprintf(debug,"hw_opt: nt %d ntmpi %d ntomp %d ntomp_pme %d gpu_id '%s'\n",
 +                hw_opt->nthreads_tot,
 +                hw_opt->nthreads_tmpi,
 +                hw_opt->nthreads_omp,
 +                hw_opt->nthreads_omp_pme,
 +                hw_opt->gpu_id!=NULL ? hw_opt->gpu_id : "");
 +                
 +    }
 +}
 +
 +
 +/* Override the value in inputrec with value passed on the command line (if any) */
 +static void override_nsteps_cmdline(FILE *fplog,
 +                                    int nsteps_cmdline,
 +                                    t_inputrec *ir,
 +                                    const t_commrec *cr)
 +{
 +    assert(ir);
 +    assert(cr);
 +
 +    /* override with anything else than the default -2 */
 +    if (nsteps_cmdline > -2)
 +    {
 +        char stmp[STRLEN];
 +
 +        ir->nsteps = nsteps_cmdline;
 +        if (EI_DYNAMICS(ir->eI))
 +        {
 +            sprintf(stmp, "Overriding nsteps with value passed on the command line: %d steps, %.3f ps",
 +                    nsteps_cmdline, nsteps_cmdline*ir->delta_t);
 +        }
 +        else
 +        {
 +            sprintf(stmp, "Overriding nsteps with value passed on the command line: %d steps",
 +                    nsteps_cmdline);
 +        }
 +
 +        md_print_warn(cr, fplog, "%s\n", stmp);
 +    }
 +}
 +
 +/* Data structure set by SIMMASTER which needs to be passed to all nodes
 + * before the other nodes have read the tpx file and called gmx_detect_hardware.
 + */
 +typedef struct {
 +    int cutoff_scheme; /* The cutoff scheme from inputrec_t */
 +    gmx_bool bUseGPU;       /* Use GPU or GPU emulation          */
 +} master_inf_t;
 +
 +int mdrunner(gmx_hw_opt_t *hw_opt,
 +             FILE *fplog,t_commrec *cr,int nfile,
 +             const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose,
 +             gmx_bool bCompact, int nstglobalcomm,
 +             ivec ddxyz,int dd_node_order,real rdd,real rconstr,
 +             const char *dddlb_opt,real dlb_scale,
 +             const char *ddcsx,const char *ddcsy,const char *ddcsz,
 +             const char *nbpu_opt,
 +             int nsteps_cmdline, int nstepout,int resetstep,
 +             int nmultisim,int repl_ex_nst,int repl_ex_nex,
 +             int repl_ex_seed, real pforce,real cpt_period,real max_hours,
 +             const char *deviceOptions, unsigned long Flags)
 +{
 +    gmx_bool   bForceUseGPU,bTryUseGPU;
 +    double     nodetime=0,realtime;
 +    t_inputrec *inputrec;
 +    t_state    *state=NULL;
 +    matrix     box;
 +    gmx_ddbox_t ddbox={0};
 +    int        npme_major,npme_minor;
 +    real       tmpr1,tmpr2;
 +    t_nrnb     *nrnb;
 +    gmx_mtop_t *mtop=NULL;
 +    t_mdatoms  *mdatoms=NULL;
 +    t_forcerec *fr=NULL;
 +    t_fcdata   *fcd=NULL;
 +    real       ewaldcoeff=0;
 +    gmx_pme_t  *pmedata=NULL;
 +    gmx_vsite_t *vsite=NULL;
 +    gmx_constr_t constr;
 +    int        i,m,nChargePerturbed=-1,status,nalloc;
 +    char       *gro;
 +    gmx_wallcycle_t wcycle;
 +    gmx_bool       bReadRNG,bReadEkin;
 +    int        list;
 +    gmx_runtime_t runtime;
 +    int        rc;
 +    gmx_large_int_t reset_counters;
 +    gmx_edsam_t ed=NULL;
 +    t_commrec   *cr_old=cr; 
 +    int         nthreads_pme=1;
 +    int         nthreads_pp=1;
 +    gmx_membed_t membed=NULL;
 +    gmx_hw_info_t *hwinfo=NULL;
 +    master_inf_t minf={-1,FALSE};
 +
 +    /* CAUTION: threads may be started later on in this function, so
 +       cr doesn't reflect the final parallel state right now */
 +    snew(inputrec,1);
 +    snew(mtop,1);
 +    
 +    if (Flags & MD_APPENDFILES) 
 +    {
 +        fplog = NULL;
 +    }
 +
 +    bForceUseGPU = (strncmp(nbpu_opt, "gpu", 3) == 0);
 +    bTryUseGPU   = (strncmp(nbpu_opt, "auto", 4) == 0) || bForceUseGPU;
 +
 +    snew(state,1);
 +    if (SIMMASTER(cr)) 
 +    {
 +        /* Read (nearly) all data required for the simulation */
 +        read_tpx_state(ftp2fn(efTPX,nfile,fnm),inputrec,state,NULL,mtop);
 +
 +        if (inputrec->cutoff_scheme != ecutsVERLET &&
 +            ((Flags & MD_TESTVERLET) || getenv("GMX_VERLET_SCHEME") != NULL))
 +        {
 +            convert_to_verlet_scheme(fplog,inputrec,mtop,det(state->box));
 +        }
 +
 +        /* Detect hardware, gather information. With tMPI only thread 0 does it
 +         * and after threads are started broadcasts hwinfo around. */
 +        snew(hwinfo, 1);
 +        gmx_detect_hardware(fplog, hwinfo, cr,
 +                            bForceUseGPU, bTryUseGPU, hw_opt->gpu_id);
 +
 +        minf.cutoff_scheme = inputrec->cutoff_scheme;
 +        minf.bUseGPU       = FALSE;
 +
 +        if (inputrec->cutoff_scheme == ecutsVERLET)
 +        {
 +            prepare_verlet_scheme(fplog,hwinfo,cr,hw_opt,nbpu_opt,
 +                                  inputrec,mtop,state->box,
 +                                  &minf.bUseGPU);
 +        }
 +        else if (hwinfo->bCanUseGPU)
 +        {
 +            md_print_warn(cr,fplog,
 +                          "NOTE: GPU(s) found, but the current simulation can not use GPUs\n"
 +                          "      To use a GPU, set the mdp option: cutoff-scheme = Verlet\n"
 +                          "      (for quick performance testing you can use the -testverlet option)\n");
 +
 +            if (bForceUseGPU)
 +            {
 +                gmx_fatal(FARGS,"GPU requested, but can't be used without cutoff-scheme=Verlet");
 +            }
 +        }
 +    }
 +#ifndef GMX_THREAD_MPI
 +    if (PAR(cr))
 +    {
 +        gmx_bcast_sim(sizeof(minf),&minf,cr);
 +    }
 +#endif
 +    if (minf.bUseGPU && cr->npmenodes == -1)
 +    {
 +        /* Don't automatically use PME-only nodes with GPUs */
 +        cr->npmenodes = 0;
 +    }
 +
 +    /* Check for externally set OpenMP affinity and turn off internal
 +     * pinning if any is found. We need to do this check early to tell
 +     * thread-MPI whether it should do pinning when spawning threads.
 +     */
 +    gmx_omp_check_thread_affinity(fplog, cr, hw_opt);
 +
 +#ifdef GMX_THREAD_MPI
 +    /* With thread-MPI inputrec is only set here on the master thread */
 +    if (SIMMASTER(cr))
 +#endif
 +    {
 +        check_and_update_hw_opt(hw_opt,minf.cutoff_scheme);
 +
 +#ifdef GMX_THREAD_MPI
 +        /* Early check for externally set process affinity. Can't do over all
 +         * MPI processes because hwinfo is not available everywhere, but with
 +         * thread-MPI it's needed as pinning might get turned off which needs
 +         * to be known before starting thread-MPI. */
 +        check_cpu_affinity_set(fplog,
 +                               NULL,
 +                               hw_opt, hwinfo->nthreads_hw_avail, FALSE);
 +#endif
 +
 +#ifdef GMX_THREAD_MPI
 +        if (cr->npmenodes > 0 && hw_opt->nthreads_tmpi <= 0)
 +        {
 +            gmx_fatal(FARGS,"You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME nodes");
 +        }
 +#endif
 +
 +        if (hw_opt->nthreads_omp_pme != hw_opt->nthreads_omp &&
 +            cr->npmenodes <= 0)
 +        {
 +            gmx_fatal(FARGS,"You need to explicitly specify the number of PME nodes (-npme) when using different number of OpenMP threads for PP and PME nodes");
 +        }
 +    }
 +
 +#ifdef GMX_THREAD_MPI
 +    if (SIMMASTER(cr))
 +    {
 +        /* NOW the threads will be started: */
 +        hw_opt->nthreads_tmpi = get_nthreads_mpi(hwinfo,
 +                                                 hw_opt,
 +                                                 inputrec, mtop,
 +                                                 cr, fplog);
 +        if (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_omp <= 0)
 +        {
 +            hw_opt->nthreads_omp = hw_opt->nthreads_tot/hw_opt->nthreads_tmpi;
 +        }
 +
 +        if (hw_opt->nthreads_tmpi > 1)
 +        {
 +            /* now start the threads. */
 +            cr=mdrunner_start_threads(hw_opt, fplog, cr_old, nfile, fnm, 
 +                                      oenv, bVerbose, bCompact, nstglobalcomm, 
 +                                      ddxyz, dd_node_order, rdd, rconstr, 
 +                                      dddlb_opt, dlb_scale, ddcsx, ddcsy, ddcsz,
 +                                      nbpu_opt,
 +                                      nsteps_cmdline, nstepout, resetstep, nmultisim, 
 +                                      repl_ex_nst, repl_ex_nex, repl_ex_seed, pforce,
 +                                      cpt_period, max_hours, deviceOptions, 
 +                                      Flags);
 +            /* the main thread continues here with a new cr. We don't deallocate
 +               the old cr because other threads may still be reading it. */
 +            if (cr == NULL)
 +            {
 +                gmx_comm("Failed to spawn threads");
 +            }
 +        }
 +    }
 +#endif
 +    /* END OF CAUTION: cr is now reliable */
 +
 +    /* g_membed initialisation *
 +     * Because we change the mtop, init_membed is called before the init_parallel *
 +     * (in case we ever want to make it run in parallel) */
 +    if (opt2bSet("-membed",nfile,fnm))
 +    {
 +        if (MASTER(cr))
 +        {
 +            fprintf(stderr,"Initializing membed");
 +        }
 +        membed = init_membed(fplog,nfile,fnm,mtop,inputrec,state,cr,&cpt_period);
 +    }
 +
 +    if (PAR(cr))
 +    {
 +        /* now broadcast everything to the non-master nodes/threads: */
 +        init_parallel(fplog, cr, inputrec, mtop);
 +
 +        /* This check needs to happen after get_nthreads_mpi() */
 +        if (inputrec->cutoff_scheme == ecutsVERLET && (Flags & MD_PARTDEC))
 +        {
 +            gmx_fatal_collective(FARGS,cr,NULL,
 +                                 "The Verlet cut-off scheme is not supported with particle decomposition.\n"
 +                                 "You can achieve the same effect as particle decomposition by running in parallel using only OpenMP threads.");
 +        }
 +    }
 +    if (fplog != NULL)
 +    {
 +        pr_inputrec(fplog,0,"Input Parameters",inputrec,FALSE);
 +    }
 +
 +#if defined GMX_THREAD_MPI
 +    /* With tMPI we detected on thread 0 and we'll just pass the hwinfo pointer
 +     * to the other threads  -- slightly uncool, but works fine, just need to
 +     * make sure that the data doesn't get freed twice. */
 +    if (cr->nnodes > 1)
 +    {
 +        if (!SIMMASTER(cr))
 +        {
 +            snew(hwinfo, 1);
 +        }
 +        gmx_bcast(sizeof(&hwinfo), &hwinfo, cr);
 +    }
 +#else
 +    if (PAR(cr) && !SIMMASTER(cr))
 +    {
 +        /* now we have inputrec on all nodes, can run the detection */
 +        /* TODO: perhaps it's better to propagate within a node instead? */
 +        snew(hwinfo, 1);
 +        gmx_detect_hardware(fplog, hwinfo, cr,
 +                                 bForceUseGPU, bTryUseGPU, hw_opt->gpu_id);
 +    }
 +
 +    /* Now do the affinity check with MPI/no-MPI (done earlier with thread-MPI). */
 +    check_cpu_affinity_set(fplog, cr,
 +                           hw_opt, hwinfo->nthreads_hw_avail, FALSE);
 +#endif
 +
 +    /* now make sure the state is initialized and propagated */
 +    set_state_entries(state,inputrec,cr->nnodes);
 +
-         ed = ed_open(nfile,fnm,Flags,cr);
 +    /* A parallel command line option consistency check that we can
 +       only do after any threads have started. */
 +    if (!PAR(cr) &&
 +        (ddxyz[XX] > 1 || ddxyz[YY] > 1 || ddxyz[ZZ] > 1 || cr->npmenodes > 0))
 +    {
 +        gmx_fatal(FARGS,
 +                  "The -dd or -npme option request a parallel simulation, "
 +#ifndef GMX_MPI
 +                  "but %s was compiled without threads or MPI enabled"
 +#else
 +#ifdef GMX_THREAD_MPI
 +                  "but the number of threads (option -nt) is 1"
 +#else
 +                  "but %s was not started through mpirun/mpiexec or only one process was requested through mpirun/mpiexec"
 +#endif
 +#endif
 +                  , ShortProgram()
 +            );
 +    }
 +
 +    if ((Flags & MD_RERUN) &&
 +        (EI_ENERGY_MINIMIZATION(inputrec->eI) || eiNM == inputrec->eI))
 +    {
 +        gmx_fatal(FARGS, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun");
 +    }
 +
 +    if (can_use_allvsall(inputrec,mtop,TRUE,cr,fplog) && PAR(cr))
 +    {
 +        /* All-vs-all loops do not work with domain decomposition */
 +        Flags |= MD_PARTDEC;
 +    }
 +
 +    if (!EEL_PME(inputrec->coulombtype) || (Flags & MD_PARTDEC))
 +    {
 +        if (cr->npmenodes > 0)
 +        {
 +            if (!EEL_PME(inputrec->coulombtype))
 +            {
 +                gmx_fatal_collective(FARGS,cr,NULL,
 +                                     "PME nodes are requested, but the system does not use PME electrostatics");
 +            }
 +            if (Flags & MD_PARTDEC)
 +            {
 +                gmx_fatal_collective(FARGS,cr,NULL,
 +                                     "PME nodes are requested, but particle decomposition does not support separate PME nodes");
 +            }
 +        }
 +
 +        cr->npmenodes = 0;
 +    }
 +
 +#ifdef GMX_FAHCORE
 +    fcRegisterSteps(inputrec->nsteps,inputrec->init_step);
 +#endif
 +
 +    /* NMR restraints must be initialized before load_checkpoint,
 +     * since with time averaging the history is added to t_state.
 +     * For proper consistency check we therefore need to extend
 +     * t_state here.
 +     * So the PME-only nodes (if present) will also initialize
 +     * the distance restraints.
 +     */
 +    snew(fcd,1);
 +
 +    /* This needs to be called before read_checkpoint to extend the state */
 +    init_disres(fplog,mtop,inputrec,cr,Flags & MD_PARTDEC,fcd,state);
 +
 +    if (gmx_mtop_ftype_count(mtop,F_ORIRES) > 0)
 +    {
 +        if (PAR(cr) && !(Flags & MD_PARTDEC))
 +        {
 +            gmx_fatal(FARGS,"Orientation restraints do not work (yet) with domain decomposition, use particle decomposition (mdrun option -pd)");
 +        }
 +        /* Orientation restraints */
 +        if (MASTER(cr))
 +        {
 +            init_orires(fplog,mtop,state->x,inputrec,cr->ms,&(fcd->orires),
 +                        state);
 +        }
 +    }
 +
 +    if (DEFORM(*inputrec))
 +    {
 +        /* Store the deform reference box before reading the checkpoint */
 +        if (SIMMASTER(cr))
 +        {
 +            copy_mat(state->box,box);
 +        }
 +        if (PAR(cr))
 +        {
 +            gmx_bcast(sizeof(box),box,cr);
 +        }
 +        /* Because we do not have the update struct available yet
 +         * in which the reference values should be stored,
 +         * we store them temporarily in static variables.
 +         * This should be thread safe, since they are only written once
 +         * and with identical values.
 +         */
 +#ifdef GMX_THREAD_MPI
 +        tMPI_Thread_mutex_lock(&deform_init_box_mutex);
 +#endif
 +        deform_init_init_step_tpx = inputrec->init_step;
 +        copy_mat(box,deform_init_box_tpx);
 +#ifdef GMX_THREAD_MPI
 +        tMPI_Thread_mutex_unlock(&deform_init_box_mutex);
 +#endif
 +    }
 +
 +    if (opt2bSet("-cpi",nfile,fnm)) 
 +    {
 +        /* Check if checkpoint file exists before doing continuation.
 +         * This way we can use identical input options for the first and subsequent runs...
 +         */
 +        if( gmx_fexist_master(opt2fn_master("-cpi",nfile,fnm,cr),cr) )
 +        {
 +            load_checkpoint(opt2fn_master("-cpi",nfile,fnm,cr),&fplog,
 +                            cr,Flags & MD_PARTDEC,ddxyz,
 +                            inputrec,state,&bReadRNG,&bReadEkin,
 +                            (Flags & MD_APPENDFILES),
 +                            (Flags & MD_APPENDFILESSET));
 +            
 +            if (bReadRNG)
 +            {
 +                Flags |= MD_READ_RNG;
 +            }
 +            if (bReadEkin)
 +            {
 +                Flags |= MD_READ_EKIN;
 +            }
 +        }
 +    }
 +
 +    if (((MASTER(cr) || (Flags & MD_SEPPOT)) && (Flags & MD_APPENDFILES))
 +#ifdef GMX_THREAD_MPI
 +        /* With thread MPI only the master node/thread exists in mdrun.c,
 +         * therefore non-master nodes need to open the "seppot" log file here.
 +         */
 +        || (!MASTER(cr) && (Flags & MD_SEPPOT))
 +#endif
 +        )
 +    {
 +        gmx_log_open(ftp2fn(efLOG,nfile,fnm),cr,!(Flags & MD_SEPPOT),
 +                             Flags,&fplog);
 +    }
 +
 +    /* override nsteps with value from cmdline */
 +    override_nsteps_cmdline(fplog, nsteps_cmdline, inputrec, cr);
 +
 +    if (SIMMASTER(cr)) 
 +    {
 +        copy_mat(state->box,box);
 +    }
 +
 +    if (PAR(cr)) 
 +    {
 +        gmx_bcast(sizeof(box),box,cr);
 +    }
 +
 +    /* Essential dynamics */
 +    if (opt2bSet("-ei",nfile,fnm))
 +    {
 +        /* Open input and output files, allocate space for ED data structure */
++        ed = ed_open(mtop->natoms,&state->edsamstate,nfile,fnm,Flags,oenv,cr);
 +    }
 +
 +    if (PAR(cr) && !((Flags & MD_PARTDEC) ||
 +                     EI_TPI(inputrec->eI) ||
 +                     inputrec->eI == eiNM))
 +    {
 +        cr->dd = init_domain_decomposition(fplog,cr,Flags,ddxyz,rdd,rconstr,
 +                                           dddlb_opt,dlb_scale,
 +                                           ddcsx,ddcsy,ddcsz,
 +                                           mtop,inputrec,
 +                                           box,state->x,
 +                                           &ddbox,&npme_major,&npme_minor);
 +
 +        make_dd_communicators(fplog,cr,dd_node_order);
 +
 +        /* Set overallocation to avoid frequent reallocation of arrays */
 +        set_over_alloc_dd(TRUE);
 +    }
 +    else
 +    {
 +        /* PME, if used, is done on all nodes with 1D decomposition */
 +        cr->npmenodes = 0;
 +        cr->duty = (DUTY_PP | DUTY_PME);
 +        npme_major = 1;
 +        npme_minor = 1;
 +        if (!EI_TPI(inputrec->eI))
 +        {
 +            npme_major = cr->nnodes;
 +        }
 +        
 +        if (inputrec->ePBC == epbcSCREW)
 +        {
 +            gmx_fatal(FARGS,
 +                      "pbc=%s is only implemented with domain decomposition",
 +                      epbc_names[inputrec->ePBC]);
 +        }
 +    }
 +
 +    if (PAR(cr))
 +    {
 +        /* After possible communicator splitting in make_dd_communicators.
 +         * we can set up the intra/inter node communication.
 +         */
 +        gmx_setup_nodecomm(fplog,cr);
 +    }
 +
 +    /* Initialize per-physical-node MPI process/thread ID and counters. */
 +    gmx_init_intranode_counters(cr);
 +
 +#ifdef GMX_MPI
 +    md_print_info(cr,fplog,"Using %d MPI %s\n",
 +                  cr->nnodes,
 +#ifdef GMX_THREAD_MPI
 +                  cr->nnodes==1 ? "thread" : "threads"
 +#else
 +                  cr->nnodes==1 ? "process" : "processes"
 +#endif
 +                  );
++    fflush(stderr);
 +#endif
 +
 +    gmx_omp_nthreads_init(fplog, cr,
 +                          hwinfo->nthreads_hw_avail,
 +                          hw_opt->nthreads_omp,
 +                          hw_opt->nthreads_omp_pme,
 +                          (cr->duty & DUTY_PP) == 0,
 +                          inputrec->cutoff_scheme == ecutsVERLET);
 +
 +    gmx_check_hw_runconf_consistency(fplog, hwinfo, cr, hw_opt->nthreads_tmpi, minf.bUseGPU);
 +
 +    /* getting number of PP/PME threads
 +       PME: env variable should be read only on one node to make sure it is 
 +       identical everywhere;
 +     */
 +    /* TODO nthreads_pp is only used for pinning threads.
 +     * This is a temporary solution until we have a hw topology library.
 +     */
 +    nthreads_pp  = gmx_omp_nthreads_get(emntNonbonded);
 +    nthreads_pme = gmx_omp_nthreads_get(emntPME);
 +
 +    wcycle = wallcycle_init(fplog,resetstep,cr,nthreads_pp,nthreads_pme);
 +
 +    if (PAR(cr))
 +    {
 +        /* Master synchronizes its value of reset_counters with all nodes 
 +         * including PME only nodes */
 +        reset_counters = wcycle_get_reset_counters(wcycle);
 +        gmx_bcast_sim(sizeof(reset_counters),&reset_counters,cr);
 +        wcycle_set_reset_counters(wcycle, reset_counters);
 +    }
 +
 +    snew(nrnb,1);
 +    if (cr->duty & DUTY_PP)
 +    {
 +        /* For domain decomposition we allocate dynamically
 +         * in dd_partition_system.
 +         */
 +        if (DOMAINDECOMP(cr))
 +        {
 +            bcast_state_setup(cr,state);
 +        }
 +        else
 +        {
 +            if (PAR(cr))
 +            {
 +                bcast_state(cr,state,TRUE);
 +            }
 +        }
 +
 +        /* Initiate forcerecord */
 +        fr = mk_forcerec();
 +        fr->hwinfo = hwinfo;
 +        init_forcerec(fplog,oenv,fr,fcd,inputrec,mtop,cr,box,FALSE,
 +                      opt2fn("-table",nfile,fnm),
 +                      opt2fn("-tabletf",nfile,fnm),
 +                      opt2fn("-tablep",nfile,fnm),
 +                      opt2fn("-tableb",nfile,fnm),
 +                      nbpu_opt,
 +                      FALSE,pforce);
 +
 +        /* version for PCA_NOT_READ_NODE (see md.c) */
 +        /*init_forcerec(fplog,fr,fcd,inputrec,mtop,cr,box,FALSE,
 +          "nofile","nofile","nofile","nofile",FALSE,pforce);
 +          */        
 +        fr->bSepDVDL = ((Flags & MD_SEPPOT) == MD_SEPPOT);
 +
 +        /* Initialize QM-MM */
 +        if(fr->bQMMM)
 +        {
 +            init_QMMMrec(cr,box,mtop,inputrec,fr);
 +        }
 +
 +        /* Initialize the mdatoms structure.
 +         * mdatoms is not filled with atom data,
 +         * as this can not be done now with domain decomposition.
 +         */
 +        mdatoms = init_mdatoms(fplog,mtop,inputrec->efep!=efepNO);
 +
 +        /* Initialize the virtual site communication */
 +        vsite = init_vsite(mtop,cr,FALSE);
 +
 +        calc_shifts(box,fr->shift_vec);
 +
 +        /* With periodic molecules the charge groups should be whole at start up
 +         * and the virtual sites should not be far from their proper positions.
 +         */
 +        if (!inputrec->bContinuation && MASTER(cr) &&
 +            !(inputrec->ePBC != epbcNONE && inputrec->bPeriodicMols))
 +        {
 +            /* Make molecules whole at start of run */
 +            if (fr->ePBC != epbcNONE)
 +            {
 +                do_pbc_first_mtop(fplog,inputrec->ePBC,box,mtop,state->x);
 +            }
 +            if (vsite)
 +            {
 +                /* Correct initial vsite positions are required
 +                 * for the initial distribution in the domain decomposition
 +                 * and for the initial shell prediction.
 +                 */
 +                construct_vsites_mtop(fplog,vsite,mtop,state->x);
 +            }
 +        }
 +
 +        if (EEL_PME(fr->eeltype))
 +        {
 +            ewaldcoeff = fr->ewaldcoeff;
 +            pmedata = &fr->pmedata;
 +        }
 +        else
 +        {
 +            pmedata = NULL;
 +        }
 +    }
 +    else
 +    {
 +        /* This is a PME only node */
 +
 +        /* We don't need the state */
 +        done_state(state);
 +
 +        ewaldcoeff = calc_ewaldcoeff(inputrec->rcoulomb, inputrec->ewald_rtol);
 +        snew(pmedata,1);
 +    }
 +
 +    /* Before setting affinity, check whether the affinity has changed
 +     * - which indicates that probably the OpenMP library has changed it since
 +     * we first checked). */
 +    check_cpu_affinity_set(fplog, cr, hw_opt, hwinfo->nthreads_hw_avail, TRUE);
 +
 +    /* Set the CPU affinity */
 +    set_cpu_affinity(fplog,cr,hw_opt,nthreads_pme,hwinfo,inputrec);
 +
 +    /* Initiate PME if necessary,
 +     * either on all nodes or on dedicated PME nodes only. */
 +    if (EEL_PME(inputrec->coulombtype))
 +    {
 +        if (mdatoms)
 +        {
 +            nChargePerturbed = mdatoms->nChargePerturbed;
 +        }
 +        if (cr->npmenodes > 0)
 +        {
 +            /* The PME only nodes need to know nChargePerturbed */
 +            gmx_bcast_sim(sizeof(nChargePerturbed),&nChargePerturbed,cr);
 +        }
 +
 +        if (cr->duty & DUTY_PME)
 +        {
 +            status = gmx_pme_init(pmedata,cr,npme_major,npme_minor,inputrec,
 +                                  mtop ? mtop->natoms : 0,nChargePerturbed,
 +                                  (Flags & MD_REPRODUCIBLE),nthreads_pme);
 +            if (status != 0) 
 +            {
 +                gmx_fatal(FARGS,"Error %d initializing PME",status);
 +            }
 +        }
 +    }
 +
 +
 +    if (integrator[inputrec->eI].func == do_md
 +#ifdef GMX_OPENMM
 +        ||
 +        integrator[inputrec->eI].func == do_md_openmm
 +#endif
 +        )
 +    {
 +        /* Turn on signal handling on all nodes */
 +        /*
 +         * (A user signal from the PME nodes (if any)
 +         * is communicated to the PP nodes.
 +         */
 +        signal_handler_install();
 +    }
 +
 +    if (cr->duty & DUTY_PP)
 +    {
 +        if (inputrec->ePull != epullNO)
 +        {
 +            /* Initialize pull code */
 +            init_pull(fplog,inputrec,nfile,fnm,mtop,cr,oenv, inputrec->fepvals->init_lambda,
 +                      EI_DYNAMICS(inputrec->eI) && MASTER(cr),Flags);
 +        }
 +        
 +        if (inputrec->bRot)
 +        {
 +           /* Initialize enforced rotation code */
 +           init_rot(fplog,inputrec,nfile,fnm,cr,state->x,box,mtop,oenv,
 +                    bVerbose,Flags);
 +        }
 +
 +        constr = init_constraints(fplog,mtop,inputrec,ed,state,cr);
 +
 +        if (DOMAINDECOMP(cr))
 +        {
 +            dd_init_bondeds(fplog,cr->dd,mtop,vsite,constr,inputrec,
 +                            Flags & MD_DDBONDCHECK,fr->cginfo_mb);
 +
 +            set_dd_parameters(fplog,cr->dd,dlb_scale,inputrec,fr,&ddbox);
 +
 +            setup_dd_grid(fplog,cr->dd);
 +        }
 +
 +        /* Now do whatever the user wants us to do (how flexible...) */
 +        integrator[inputrec->eI].func(fplog,cr,nfile,fnm,
 +                                      oenv,bVerbose,bCompact,
 +                                      nstglobalcomm,
 +                                      vsite,constr,
 +                                      nstepout,inputrec,mtop,
 +                                      fcd,state,
 +                                      mdatoms,nrnb,wcycle,ed,fr,
 +                                      repl_ex_nst,repl_ex_nex,repl_ex_seed,
 +                                      membed,
 +                                      cpt_period,max_hours,
 +                                      deviceOptions,
 +                                      Flags,
 +                                      &runtime);
 +
 +        if (inputrec->ePull != epullNO)
 +        {
 +            finish_pull(fplog,inputrec->pull);
 +        }
 +        
 +        if (inputrec->bRot)
 +        {
 +            finish_rot(fplog,inputrec->rot);
 +        }
 +
 +    } 
 +    else 
 +    {
 +        /* do PME only */
 +        gmx_pmeonly(*pmedata,cr,nrnb,wcycle,ewaldcoeff,FALSE,inputrec);
 +    }
 +
 +    if (EI_DYNAMICS(inputrec->eI) || EI_TPI(inputrec->eI))
 +    {
 +        /* Some timing stats */  
 +        if (SIMMASTER(cr))
 +        {
 +            if (runtime.proc == 0)
 +            {
 +                runtime.proc = runtime.real;
 +            }
 +        }
 +        else
 +        {
 +            runtime.real = 0;
 +        }
 +    }
 +
 +    wallcycle_stop(wcycle,ewcRUN);
 +
 +    /* Finish up, write some stuff
 +     * if rerunMD, don't write last frame again 
 +     */
 +    finish_run(fplog,cr,ftp2fn(efSTO,nfile,fnm),
 +               inputrec,nrnb,wcycle,&runtime,
 +               fr != NULL && fr->nbv != NULL && fr->nbv->bUseGPU ?
 +                 nbnxn_cuda_get_timings(fr->nbv->cu_nbv) : NULL,
 +               nthreads_pp, 
 +               EI_DYNAMICS(inputrec->eI) && !MULTISIM(cr));
 +
 +    if ((cr->duty & DUTY_PP) && fr->nbv != NULL && fr->nbv->bUseGPU)
 +    {
 +        char gpu_err_str[STRLEN];
 +
 +        /* free GPU memory and uninitialize GPU (by destroying the context) */
 +        nbnxn_cuda_free(fplog, fr->nbv->cu_nbv);
 +
 +        if (!free_gpu(gpu_err_str))
 +        {
 +            gmx_warning("On node %d failed to free GPU #%d: %s",
 +                        cr->nodeid, get_current_gpu_device_id(), gpu_err_str);
 +        }
 +    }
 +
 +    if (opt2bSet("-membed",nfile,fnm))
 +    {
 +        sfree(membed);
 +    }
 +
 +#ifdef GMX_THREAD_MPI
 +    if (PAR(cr) && SIMMASTER(cr))
 +#endif
 +    {
 +        gmx_hardware_info_free(hwinfo);
 +    }
 +
 +    /* Does what it says */  
 +    print_date_and_time(fplog,cr->nodeid,"Finished mdrun",&runtime);
 +
 +    /* Close logfile already here if we were appending to it */
 +    if (MASTER(cr) && (Flags & MD_APPENDFILES))
 +    {
 +        gmx_log_close(fplog);
 +    } 
 +
 +    rc=(int)gmx_get_stop_condition();
 +
 +#ifdef GMX_THREAD_MPI
 +    /* we need to join all threads. The sub-threads join when they
 +       exit this function, but the master thread needs to be told to 
 +       wait for that. */
 +    if (PAR(cr) && MASTER(cr))
 +    {
 +        tMPI_Finalize();
 +    }
 +#endif
 +
 +    return rc;
 +}
index 3a27007d7000beddfd32c16f5bd4af968f1be9a8,0000000000000000000000000000000000000000..ab8c9a5749daedcb7f8d403a0ebdb6d6d54238a0
mode 100644,000000..100644
--- /dev/null
@@@ -1,306 -1,0 +1,306 @@@
-         "Analysing hydrogen-bonding network for automated assigment of histidine\n"
 +/*
 + * 
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +/* This file is completely threadsafe - keep it that way! */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <stdio.h>
 +#include <string.h>
 +#include "typedefs.h"
 +#include "pdbio.h"
 +#include "smalloc.h"
 +#include "vec.h"
 +#include "physics.h"
 +#include "toputil.h"
 +#include "pdb2top.h"
 +#include "string2.h"
 +#include "macros.h"
 +
 +static int in_strings(char *key,int nstr,const char **str)
 +{
 +  int j;
 +  
 +  for(j=0; (j<nstr); j++)
 +    if (strcmp(str[j],key) == 0)
 +      return j;
 +      
 +  return -1;
 +}
 +
 +static gmx_bool hbond(rvec x[],int i,int j,real distance)
 +{
 +  real tol = distance*distance;
 +  rvec   tmp;
 +  
 +  rvec_sub(x[i],x[j],tmp);
 +  
 +  return (iprod(tmp,tmp) < tol);
 +}
 +
 +static void chk_allhb(t_atoms *pdba,rvec x[],t_blocka *hb,
 +                    gmx_bool donor[],gmx_bool accept[],real dist)
 +{
 +  int i,j,k,ii,natom;
 +  
 +  natom=pdba->nr;
 +  snew(hb->index,natom+1);
 +  snew(hb->a,6*natom);
 +  hb->nr  = natom;
 +  hb->nra = 6*natom;
 +  
 +  k = ii = 0;
 +  hb->index[ii++] = 0;
 +  for(i=0; (i<natom); i++) {
 +    if (donor[i]) {
 +      for(j=i+1; (j<natom); j++) 
 +      if ((accept[j]) && (hbond(x,i,j,dist))) 
 +        hb->a[k++] = j;
 +    }
 +    else if (accept[i]) {
 +      for(j=i+1; (j<natom); j++) 
 +      if ((donor[j]) && (hbond(x,i,j,dist))) 
 +        hb->a[k++] = j;
 +    }
 +    hb->index[ii++] = k;
 +  }
 +  hb->nra = k;
 +}
 +
 +static void pr_hbonds(FILE *fp,t_blocka *hb,t_atoms *pdba)
 +{
 +  int i,j,k,j0,j1;
 +  
 +  fprintf(fp,"Dumping all hydrogen bonds!\n");
 +  for(i=0; (i<hb->nr); i++) {
 +    j0=hb->index[i];
 +    j1=hb->index[i+1];
 +    for(j=j0; (j<j1); j++) {
 +      k=hb->a[j];
 +      fprintf(fp,"%5s%4d%5s - %5s%4d%5s\n",
 +            *pdba->resinfo[pdba->atom[i].resind].name,
 +            pdba->resinfo[pdba->atom[i].resind].nr,*pdba->atomname[i],
 +            *pdba->resinfo[pdba->atom[k].resind].name,
 +            pdba->resinfo[pdba->atom[k].resind].nr,*pdba->atomname[k]);
 +    }
 +  }
 +}
 +
 +static gmx_bool chk_hbonds(int i,t_atoms *pdba, rvec x[],
 +                     gmx_bool ad[],gmx_bool hbond[],rvec xh,
 +                     real angle,real dist)
 +{
 +  gmx_bool bHB;
 +  int  j,aj,ri,natom;
 +  real d2,dist2,a;
 +  rvec nh,oh;
 +  
 +  natom=pdba->nr;
 +  bHB = FALSE;
 +  ri = pdba->atom[i].resind;
 +  dist2=sqr(dist);
 +  for(j=0; (j<natom); j++) {
 +    /* Check whether the other atom is a donor/acceptor and not i */
 +    if ((ad[j]) && (j != i)) {
 +      /* Check whether the other atom is on the same ring as well */
 +      if ((pdba->atom[j].resind != ri) ||
 +        ((strcmp(*pdba->atomname[j],"ND1") != 0) &&
 +         (strcmp(*pdba->atomname[j],"NE2") != 0))) {
 +      aj = j;
 +      d2  = distance2(x[i],x[j]);
 +      rvec_sub(x[i],xh,nh);
 +      rvec_sub(x[aj],xh,oh);
 +      a  = RAD2DEG * acos(cos_angle(nh,oh));
 +      if ((d2 < dist2) && (a > angle)) {
 +        if (debug)
 +          fprintf(debug,
 +                  "HBOND between %s%d-%s and %s%d-%s is %g nm, %g deg\n",
 +                  *pdba->resinfo[pdba->atom[i].resind].name, 
 +                  pdba->resinfo[pdba->atom[i].resind].nr,*pdba->atomname[i],
 +                  *pdba->resinfo[pdba->atom[aj].resind].name,
 +                  pdba->resinfo[pdba->atom[aj].resind].nr,*pdba->atomname[aj],
 +                  sqrt(d2),a);
 +        hbond[i] = TRUE;
 +        bHB      = TRUE;
 +      }
 +      }
 +    }
 +  }
 +  return bHB;
 +}
 +
 +static void calc_ringh(rvec xattach,rvec xb,rvec xc,rvec xh)
 +{
 +  rvec tab,tac;
 +  real n;
 + 
 +  /* Add a proton on a ring to atom attach at distance 0.1 nm */ 
 +  rvec_sub(xattach,xb,tab);
 +  rvec_sub(xattach,xc,tac);
 +  rvec_add(tab,tac,xh);
 +  n=0.1/norm(xh);
 +  svmul(n,xh,xh);
 +  rvec_inc(xh,xattach);
 +}
 +
 +void set_histp(t_atoms *pdba,rvec *x,real angle,real dist){
 +  static const char *prot_acc[] = {
 +    "O", "OD1", "OD2", "OE1", "OE2", "OG", "OG1", "OH", "OW"
 +  };
 +#define NPA asize(prot_acc)
 +  static const char *prot_don[] = {
 +    "N", "NH1", "NH2", "NE", "ND1", "ND2", "NE2", "NZ", "OG", "OG1", "OH", "NE1", "OW"
 +  };
 +#define NPD asize(prot_don)
 +  
 +  gmx_bool *donor,*acceptor;
 +  gmx_bool *hbond,bHaveH=FALSE;
 +  gmx_bool bHDd,bHEd;
 +  rvec xh1,xh2;
 +  int  natom;
 +  int  i,j,nd,na,aj,hisind,his0,type=-1;
 +  int  nd1,ne2,cg,cd2,ce1;
 +  t_blocka *hb;
 +  real d;
 +  char *atomnm;
 +  
 +  natom=pdba->nr;
 +
 +  i = 0;
 +  while (i < natom &&
 +       gmx_strcasecmp(*pdba->resinfo[pdba->atom[i].resind].name,"HIS") != 0)
 +  {
 +    i++;
 +  }
 +  if (natom == i)
 +  {
 +    return;
 +  }
 +
 +  /* A histidine residue exists that requires automated assignment, so
 +   * doing the analysis of donors and acceptors is worthwhile. */
 +  fprintf(stderr,
++        "Analysing hydrogen-bonding network for automated assignment of histidine\n"
 +        " protonation.");
 +
 +  snew(donor,natom);
 +  snew(acceptor,natom);
 +  snew(hbond,natom);
 +  snew(hb,1);
 +  
 +  nd=na=0;
 +  for(j=0; (j<natom); j++) {
 +    if (in_strings(*pdba->atomname[j],NPA,prot_acc) != -1) {
 +      acceptor[j] = TRUE;
 +      na++;
 +    }
 +    if (in_strings(*pdba->atomname[j],NPD,prot_don) != -1) {
 +      donor[j] = TRUE;
 +      nd++;
 +    }
 +  }
 +  fprintf(stderr," %d donors and %d acceptors were found.\n",nd,na);
 +  chk_allhb(pdba,x,hb,donor,acceptor,dist);
 +  if (debug)
 +    pr_hbonds(debug,hb,pdba);
 +  fprintf(stderr,"There are %d hydrogen bonds\n",hb->nra);
 +  
 +  /* Now do the HIS stuff */
 +  hisind=-1;
 +  while (i < natom)
 +  {
 +    if (gmx_strcasecmp(*pdba->resinfo[pdba->atom[i].resind].name,"HIS") != 0) 
 +    {
 +      i++;
 +    }
 +    else
 +    {
 +      if (pdba->atom[i].resind != hisind) {
 +      hisind=pdba->atom[i].resind;
 +      
 +      /* Find the  atoms in the ring */
 +      nd1=ne2=cg=cd2=ce1=-1;
 +      while (i<natom && pdba->atom[i].resind==hisind) {
 +        atomnm = *pdba->atomname[i];
 +        if (strcmp(atomnm,"CD2") == 0)
 +          cd2 = i;
 +        else if (strcmp(atomnm,"CG") == 0)
 +          cg  = i;
 +        else if (strcmp(atomnm,"CE1") == 0)
 +          ce1 = i;
 +        else if (strcmp(atomnm,"ND1") == 0)
 +          nd1 = i;
 +        else if (strcmp(atomnm,"NE2") == 0)
 +          ne2 = i;
 +
 +        i++;
 +      }
 +      
 +      if (!((cg == -1 ) || (cd2 == -1) || (ce1 == -1) ||
 +            (nd1 == -1) || (ne2 == -1))) {
 +        calc_ringh(x[nd1],x[cg],x[ce1],xh1);
 +        calc_ringh(x[ne2],x[ce1],x[cd2],xh2);
 +        
 +        bHDd = chk_hbonds(nd1,pdba,x,acceptor,hbond,xh1,angle,dist);
 +        chk_hbonds(nd1,pdba,x,donor,hbond,xh1,angle,dist);
 +        bHEd = chk_hbonds(ne2,pdba,x,acceptor,hbond,xh2,angle,dist);
 +        chk_hbonds(ne2,pdba,x,donor,hbond,xh2,angle,dist);
 +        
 +        if (bHDd) {
 +          if (bHEd)
 +            type = ehisH;
 +          else 
 +            type = ehisA;
 +        }
 +        else 
 +          type = ehisB;
 +        fprintf(stderr,"Will use %s for residue %d\n",
 +                hh[type],pdba->resinfo[hisind].nr);
 +      }
 +      else 
 +        gmx_fatal(FARGS,"Incomplete ring in HIS%d",
 +                  pdba->resinfo[hisind].nr);
 +      
 +      snew(pdba->resinfo[hisind].rtp,1);
 +      *pdba->resinfo[hisind].rtp = strdup(hh[type]);
 +      }
 +    }
 +  }
 +  done_blocka(hb);
 +  sfree(hb);
 +  sfree(donor);
 +  sfree(acceptor);
 +  sfree(hbond);
 +}
index 9bdf3666a1de51bfb05665b88875e101e7fe3265,0000000000000000000000000000000000000000..18dde119a6eac6b122882330a3124fd9739c881f
mode 100644,000000..100644
--- /dev/null
@@@ -1,2030 -1,0 +1,2031 @@@
-             gmx_fatal(FARGS,"In the chosen force field there is no residue type for '%s'%s",name,bStart ? " as a starting terminus" : (bEnd ? " as an ending terminus" : ""));
 +/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
 + *
 + *
 + *                This source code is part of
 + * 
 + *                 G   R   O   M   A   C   S
 + * 
 + *          GROningen MAchine for Chemical Simulations
 + * 
 + *                        VERSION 3.2.0
 + * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
 + * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
 + * Copyright (c) 2001-2004, The GROMACS development team,
 + * check out http://www.gromacs.org for more information.
 +
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; either version 2
 + * of the License, or (at your option) any later version.
 + * 
 + * If you want to redistribute modifications, please consider that
 + * scientific software is very special. Version control is crucial -
 + * bugs must be traceable. We will be happy to consider code for
 + * inclusion in the official distribution, but derived work must not
 + * be called official GROMACS. Details are found in the README & COPYING
 + * files - if they are missing, get the official version at www.gromacs.org.
 + * 
 + * To help us fund GROMACS development, we humbly ask that you cite
 + * the papers on the package - you can find them in the top README file.
 + * 
 + * For more info, check our website at http://www.gromacs.org
 + * 
 + * And Hey:
 + * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
 + */
 +#ifdef HAVE_CONFIG_H
 +#include <config.h>
 +#endif
 +
 +#include <time.h>
 +#include <ctype.h>
 +#include "sysstuff.h"
 +#include "typedefs.h"
 +#include "gmxfio.h"
 +#include "smalloc.h"
 +#include "copyrite.h"
 +#include "string2.h"
 +#include "confio.h"
 +#include "symtab.h"
 +#include "vec.h"
 +#include "statutil.h"
 +#include "futil.h"
 +#include "gmx_fatal.h"
 +#include "pdbio.h"
 +#include "toputil.h"
 +#include "h_db.h"
 +#include "physics.h"
 +#include "pgutil.h"
 +#include "calch.h"
 +#include "resall.h"
 +#include "pdb2top.h"
 +#include "ter_db.h"
 +#include "strdb.h"
 +#include "gbutil.h"
 +#include "genhydro.h"
 +#include "readinp.h"
 +#include "atomprop.h"
 +#include "xlate.h"
 +#include "specbond.h"
 +#include "index.h"
 +#include "hizzie.h"
 +#include "fflibutil.h"
 +#include "macros.h"
 +
 +
 +typedef struct {
 +  char gmx[6];
 +  char main[6];
 +  char nter[6];
 +  char cter[6];
 +  char bter[6];
 +} rtprename_t;
 +
 +
 +static const char *res2bb_notermini(const char *name,
 +                                  int nrr,const rtprename_t *rr)
 +{
 +  /* NOTE: This function returns the main building block name,
 +   *       it does not take terminal renaming into account.
 +   */
 +  int i;
 +
 +  i = 0;
 +  while (i < nrr && gmx_strcasecmp(name,rr[i].gmx) != 0) {
 +    i++;
 +  }
 +
 +  return (i < nrr ? rr[i].main : name);
 +}
 +
 +static const char *select_res(int nr,int resnr,
 +                            const char *name[],const char *expl[],
 +                            const char *title,
 +                            int nrr,const rtprename_t *rr)
 +{
 +  int sel=0;
 +
 +  printf("Which %s type do you want for residue %d\n",title,resnr+1);
 +  for(sel=0; (sel < nr); sel++) {
 +    printf("%d. %s (%s)\n",
 +         sel,expl[sel],res2bb_notermini(name[sel],nrr,rr));
 +  }
 +  printf("\nType a number:"); fflush(stdout);
 +
 +  if (scanf("%d",&sel) != 1)
 +    gmx_fatal(FARGS,"Answer me for res %s %d!",title,resnr+1);
 +  
 +  return name[sel];
 +}
 +
 +static const char *get_asptp(int resnr,int nrr,const rtprename_t *rr)
 +{
 +  enum { easp, easpH, easpNR };
 +  const char *lh[easpNR] = { "ASP", "ASPH" };
 +  const char *expl[easpNR] = {
 +    "Not protonated (charge -1)",
 +    "Protonated (charge 0)"
 +  };
 +
 +  return select_res(easpNR,resnr,lh,expl,"ASPARTIC ACID",nrr,rr);
 +}
 +
 +static const char *get_glutp(int resnr,int nrr,const rtprename_t *rr)
 +{
 +  enum { eglu, egluH, egluNR };
 +  const char *lh[egluNR] = { "GLU", "GLUH" };
 +  const char *expl[egluNR] = {
 +    "Not protonated (charge -1)",
 +    "Protonated (charge 0)"
 +  };
 +
 +  return select_res(egluNR,resnr,lh,expl,"GLUTAMIC ACID",nrr,rr);
 +}
 +
 +static const char *get_glntp(int resnr,int nrr,const rtprename_t *rr)
 +{
 +  enum { egln, eglnH, eglnNR };
 +  const char *lh[eglnNR] = { "GLN", "QLN" };
 +  const char *expl[eglnNR] = {
 +    "Not protonated (charge 0)",
 +    "Protonated (charge +1)"
 +  };
 +
 +  return select_res(eglnNR,resnr,lh,expl,"GLUTAMINE",nrr,rr);
 +}
 +
 +static const char *get_lystp(int resnr,int nrr,const rtprename_t *rr)
 +{
 +  enum { elys, elysH, elysNR };
 +  const  char *lh[elysNR] = { "LYSN", "LYS" };
 +  const char *expl[elysNR] = {
 +    "Not protonated (charge 0)",
 +    "Protonated (charge +1)"
 +  };
 +
 +  return select_res(elysNR,resnr,lh,expl,"LYSINE",nrr,rr);
 +}
 +
 +static const char *get_argtp(int resnr,int nrr,const rtprename_t *rr)
 +{
 +  enum { earg, eargH, eargNR };
 +  const  char *lh[eargNR] = { "ARGN", "ARG" };
 +  const char *expl[eargNR] = {
 +    "Not protonated (charge 0)",
 +    "Protonated (charge +1)"
 +  };
 +
 +  return select_res(eargNR,resnr,lh,expl,"ARGININE",nrr,rr);
 +}
 +
 +static const char *get_histp(int resnr,int nrr,const rtprename_t *rr)
 +{
 +  const char *expl[ehisNR] = {
 +    "H on ND1 only",
 +    "H on NE2 only",
 +    "H on ND1 and NE2",
 +    "Coupled to Heme"
 +  };
 +  
 +  return select_res(ehisNR,resnr,hh,expl,"HISTIDINE",nrr,rr);
 +}
 +
 +static void read_rtprename(const char *fname,FILE *fp,
 +                         int *nrtprename,rtprename_t **rtprename)
 +{
 +  char line[STRLEN],buf[STRLEN];
 +  int  n;
 +  rtprename_t *rr;
 +  int  ncol,nc;
 +
 +  n  = *nrtprename;
 +  rr = *rtprename;
 +
 +  ncol = 0;
 +  while(get_a_line(fp,line,STRLEN)) {
 +    srenew(rr,n+1);
 +    nc = sscanf(line,"%s %s %s %s %s %s",
 +              rr[n].gmx,rr[n].main,rr[n].nter,rr[n].cter,rr[n].bter,buf);
 +    if (ncol == 0) {
 +      if (nc != 2 && nc != 5) {
 +      gmx_fatal(FARGS,"Residue renaming database '%s' has %d columns instead of %d, %d or %d",fname,ncol,2,5);
 +      }
 +      ncol = nc;
 +    } else if (nc != ncol) {
 +      gmx_fatal(FARGS,"A line in residue renaming database '%s' has %d columns, while previous lines have %d columns",fname,nc,ncol);
 +    }
 +    
 +    if (nc == 2) {
 +      /* This file does not have special termini names, copy them from main */
 +      strcpy(rr[n].nter,rr[n].main);
 +      strcpy(rr[n].cter,rr[n].main);
 +      strcpy(rr[n].bter,rr[n].main);
 +    }
 +
 +    n++;
 +  }
 +
 +  *nrtprename = n;
 +  *rtprename  = rr;
 +}
 +
 +static char *search_resrename(int nrr,rtprename_t *rr,
 +                              const char *name,
 +                              gmx_bool bStart,gmx_bool bEnd,
 +                              gmx_bool bCompareFFRTPname)
 +{
 +    char *nn;
 +    int i;
 +
 +    nn = NULL;
 +
 +    i = 0;
 +    while (i<nrr && ((!bCompareFFRTPname && strcmp(name,rr[i].gmx)  != 0) ||
 +                     ( bCompareFFRTPname && strcmp(name,rr[i].main) != 0)))
 +    {
 +        i++;
 +    }
 +
 +    /* If found in the database, rename this residue's rtp building block,
 +     * otherwise keep the old name.
 +     */
 +    if (i < nrr)
 +    {
 +        if (bStart && bEnd)
 +        {
 +            nn = rr[i].bter;
 +        }
 +        else if (bStart)
 +        {
 +            nn = rr[i].nter;
 +        }
 +        else if (bEnd)
 +        {
 +            nn = rr[i].cter;
 +        }
 +        else
 +        {
 +            nn = rr[i].main;
 +        }
++        
 +        if (nn[0] == '-')
 +        {
++            gmx_fatal(FARGS,"In the chosen force field there is no residue type for '%s'%s",name,bStart ? ( bEnd ? " as a standalone (starting & ending) residue" : " as a starting terminus") : (bEnd ? " as an ending terminus" : ""));
 +        }
 +    }
 +
 +    return nn;
 +}
 +      
 +
 +static void rename_resrtp(t_atoms *pdba,int nterpairs,int *r_start,int *r_end,
 +                          int nrr,rtprename_t *rr,t_symtab *symtab,
 +                          gmx_bool bVerbose)
 +{
 +    int  r,i,j;
 +    gmx_bool bStart,bEnd;
 +    char *nn;
 +    gmx_bool bFFRTPTERRNM;
 +
 +    bFFRTPTERRNM = (getenv("GMX_NO_FFRTP_TER_RENAME") == NULL);
 +
 +    for(r=0; r<pdba->nres; r++)
 +    {
 +        bStart = FALSE;
 +        bEnd   = FALSE;
 +        for(j=0; j<nterpairs; j++)
 +        {
 +            if (r == r_start[j])
 +            {
 +                bStart = TRUE;
 +            }
 +        }
 +        for(j=0; j<nterpairs; j++)
 +        {
 +            if (r == r_end[j])
 +            {
 +                bEnd = TRUE;
 +            }
 +        }
 +
 +        nn = search_resrename(nrr,rr,*pdba->resinfo[r].rtp,bStart,bEnd,FALSE);
 +
 +        if (bFFRTPTERRNM && nn == NULL && (bStart || bEnd))
 +        {
 +            /* This is a terminal residue, but the residue name,
 +             * currently stored in .rtp, is not a standard residue name,
 +             * but probably a force field specific rtp name.
 +             * Check if we need to rename it because it is terminal.
 +             */
 +            nn = search_resrename(nrr,rr,
 +                                  *pdba->resinfo[r].rtp,bStart,bEnd,TRUE);
 +        }
 +
 +        if (nn != NULL && strcmp(*pdba->resinfo[r].rtp,nn) != 0)
 +        {
 +            if (bVerbose)
 +            {
 +                printf("Changing rtp entry of residue %d %s to '%s'\n",
 +                       pdba->resinfo[r].nr,*pdba->resinfo[r].name,nn);
 +            }
 +            pdba->resinfo[r].rtp = put_symtab(symtab,nn);
 +        }
 +    }
 +}
 +
 +static void pdbres_to_gmxrtp(t_atoms *pdba)
 +{
 +    int i;
 +  
 +    for(i=0; (i<pdba->nres); i++)
 +    {
 +        if (pdba->resinfo[i].rtp == NULL)
 +        {
 +            pdba->resinfo[i].rtp = pdba->resinfo[i].name;
 +        }
 +    }
 +}
 +
 +static void rename_pdbres(t_atoms *pdba,const char *oldnm,const char *newnm,
 +                          gmx_bool bFullCompare,t_symtab *symtab)
 +{
 +    char *resnm;
 +    int i;
 +  
 +    for(i=0; (i<pdba->nres); i++)
 +    {
 +        resnm = *pdba->resinfo[i].name;
 +        if ((bFullCompare && (gmx_strcasecmp(resnm,oldnm) == 0)) ||
 +            (!bFullCompare && strstr(resnm,oldnm) != NULL))
 +        {
 +            /* Rename the residue name (not the rtp name) */
 +            pdba->resinfo[i].name = put_symtab(symtab,newnm);
 +        }
 +    }
 +}
 +
 +static void rename_bb(t_atoms *pdba,const char *oldnm,const char *newnm,
 +                      gmx_bool bFullCompare,t_symtab *symtab)
 +{
 +    char *bbnm;
 +    int i;
 +  
 +    for(i=0; (i<pdba->nres); i++)
 +    {
 +        /* We have not set the rtp name yes, use the residue name */
 +        bbnm = *pdba->resinfo[i].name;
 +        if ((bFullCompare && (gmx_strcasecmp(bbnm,oldnm) == 0)) ||
 +            (!bFullCompare && strstr(bbnm,oldnm) != NULL))
 +        {
 +            /* Change the rtp builing block name */
 +            pdba->resinfo[i].rtp = put_symtab(symtab,newnm);
 +        }
 +    }
 +}
 +
 +static void rename_bbint(t_atoms *pdba,const char *oldnm,
 +                         const char *gettp(int,int,const rtprename_t *),
 +                         gmx_bool bFullCompare,
 +                         t_symtab *symtab,
 +                         int nrr,const rtprename_t *rr)
 +{
 +    int  i;
 +    const char *ptr;
 +    char *bbnm;
 +  
 +    for(i=0; i<pdba->nres; i++)
 +    {
 +        /* We have not set the rtp name yes, use the residue name */
 +        bbnm = *pdba->resinfo[i].name;
 +        if ((bFullCompare && (strcmp(bbnm,oldnm) == 0)) ||
 +            (!bFullCompare && strstr(bbnm,oldnm) != NULL))
 +        {
 +            ptr = gettp(i,nrr,rr);
 +            pdba->resinfo[i].rtp = put_symtab(symtab,ptr);
 +        }
 +    }
 +}
 +
 +static void check_occupancy(t_atoms *atoms,const char *filename,gmx_bool bVerbose)
 +{
 +  int i,ftp;
 +  int nzero=0;
 +  int nnotone=0;
 +  
 +  ftp = fn2ftp(filename);
 +  if (!atoms->pdbinfo || ((ftp != efPDB) && (ftp != efBRK) && (ftp != efENT)))
 +    fprintf(stderr,"No occupancies in %s\n",filename);
 +  else {
 +    for(i=0; (i<atoms->nr); i++) {
 +      if (atoms->pdbinfo[i].occup != 1) {
 +      if (bVerbose)
 +        fprintf(stderr,"Occupancy for atom %s%d-%s is %f rather than 1\n",
 +                *atoms->resinfo[atoms->atom[i].resind].name,
 +                atoms->resinfo[ atoms->atom[i].resind].nr,
 +                *atoms->atomname[i],
 +                atoms->pdbinfo[i].occup);
 +      if (atoms->pdbinfo[i].occup == 0) 
 +        nzero++;
 +      else 
 +        nnotone++;
 +      }
 +    }
 +    if (nzero == atoms->nr) {
 +      fprintf(stderr,"All occupancy fields zero. This is probably not an X-Ray structure\n");
 +    } else if ((nzero > 0) || (nnotone > 0)) {
 +      fprintf(stderr,
 +            "\n"
 +            "WARNING: there were %d atoms with zero occupancy and %d atoms with\n"
 +            "         occupancy unequal to one (out of %d atoms). Check your pdb file.\n"
 +            "\n",
 +            nzero,nnotone,atoms->nr);
 +    } else {
 +      fprintf(stderr,"All occupancies are one\n");
 +    }
 +  }
 +}
 +
 +void write_posres(char *fn,t_atoms *pdba,real fc)
 +{
 +  FILE *fp;
 +  int  i;
 +  
 +  fp=gmx_fio_fopen(fn,"w");
 +  fprintf(fp,
 +        "; In this topology include file, you will find position restraint\n"
 +        "; entries for all the heavy atoms in your original pdb file.\n"
 +        "; This means that all the protons which were added by pdb2gmx are\n"
 +        "; not restrained.\n"
 +        "\n"
 +        "[ position_restraints ]\n"
 +        "; %4s%6s%8s%8s%8s\n","atom","type","fx","fy","fz"
 +        );
 +  for(i=0; (i<pdba->nr); i++) {
 +    if (!is_hydrogen(*pdba->atomname[i]) && !is_dummymass(*pdba->atomname[i]))
 +      fprintf(fp,"%6d%6d  %g  %g  %g\n",i+1,1,fc,fc,fc);
 +  }
 +  gmx_fio_fclose(fp);
 +}
 +
 +static int read_pdball(const char *inf, const char *outf,char *title,
 +                     t_atoms *atoms, rvec **x,
 +                     int *ePBC,matrix box, gmx_bool bRemoveH,
 +                     t_symtab *symtab,gmx_residuetype_t rt,const char *watres,
 +                     gmx_atomprop_t aps,gmx_bool bVerbose)
 +/* Read a pdb file. (containing proteins) */
 +{
 +  int  natom,new_natom,i;
 +  
 +  /* READ IT */
 +  printf("Reading %s...\n",inf);
 +  get_stx_coordnum(inf,&natom);
 +  init_t_atoms(atoms,natom,TRUE);
 +  snew(*x,natom);
 +  read_stx_conf(inf,title,atoms,*x,NULL,ePBC,box);
 +  if (fn2ftp(inf) == efPDB)
 +    get_pdb_atomnumber(atoms,aps);
 +  if (bRemoveH) {
 +    new_natom=0;
 +    for(i=0; i<atoms->nr; i++)
 +      if (!is_hydrogen(*atoms->atomname[i])) {
 +      atoms->atom[new_natom]=atoms->atom[i];
 +      atoms->atomname[new_natom]=atoms->atomname[i];
 +      atoms->pdbinfo[new_natom]=atoms->pdbinfo[i];
 +      copy_rvec((*x)[i],(*x)[new_natom]);
 +      new_natom++;
 +      }
 +    atoms->nr=new_natom;
 +    natom=new_natom;
 +  }
 +    
 +  printf("Read");
 +  if (title && title[0])
 +    printf(" '%s',",title);
 +  printf(" %d atoms\n",natom);
 +  
 +  /* Rename residues */
 +  rename_pdbres(atoms,"HOH",watres,FALSE,symtab);
 +  rename_pdbres(atoms,"SOL",watres,FALSE,symtab);
 +  rename_pdbres(atoms,"WAT",watres,FALSE,symtab);
 +
 +  rename_atoms("xlateat.dat",NULL,
 +             atoms,symtab,NULL,TRUE,rt,TRUE,bVerbose);
 +  
 +  if (natom == 0)
 +    return 0;
 +
 +  if (outf)
 +    write_sto_conf(outf,title,atoms,*x,NULL,*ePBC,box);
 + 
 +  return natom;
 +}
 +
 +void process_chain(t_atoms *pdba, rvec *x, 
 +                 gmx_bool bTrpU,gmx_bool bPheU,gmx_bool bTyrU,
 +                 gmx_bool bLysMan,gmx_bool bAspMan,gmx_bool bGluMan,
 +                 gmx_bool bHisMan,gmx_bool bArgMan,gmx_bool bGlnMan,
 +                 real angle,real distance,t_symtab *symtab,
 +                 int nrr,const rtprename_t *rr)
 +{
 +  /* Rename aromatics, lys, asp and histidine */
 +  if (bTyrU) rename_bb(pdba,"TYR","TYRU",FALSE,symtab);
 +  if (bTrpU) rename_bb(pdba,"TRP","TRPU",FALSE,symtab);
 +  if (bPheU) rename_bb(pdba,"PHE","PHEU",FALSE,symtab);
 +  if (bLysMan) 
 +    rename_bbint(pdba,"LYS",get_lystp,FALSE,symtab,nrr,rr);
 +  if (bArgMan) 
 +    rename_bbint(pdba,"ARG",get_argtp,FALSE,symtab,nrr,rr);
 +  if (bGlnMan) 
 +    rename_bbint(pdba,"GLN",get_glntp,FALSE,symtab,nrr,rr);
 +  if (bAspMan) 
 +    rename_bbint(pdba,"ASP",get_asptp,FALSE,symtab,nrr,rr);
 +  else
 +    rename_bb(pdba,"ASPH","ASP",FALSE,symtab);
 +  if (bGluMan) 
 +    rename_bbint(pdba,"GLU",get_glutp,FALSE,symtab,nrr,rr);
 +  else
 +    rename_bb(pdba,"GLUH","GLU",FALSE,symtab);
 +
 +  if (!bHisMan)
 +    set_histp(pdba,x,angle,distance);
 +  else
 +    rename_bbint(pdba,"HIS",get_histp,TRUE,symtab,nrr,rr);
 +
 +  /* Initialize the rtp builing block names with the residue names
 +   * for the residues that have not been processed above.
 +   */
 +  pdbres_to_gmxrtp(pdba);
 +
 +  /* Now we have all rtp names set.
 +   * The rtp names will conform to Gromacs naming,
 +   * unless the input pdb file contained one or more force field specific
 +   * rtp names as residue names.
 +   */
 +}
 +
 +/* struct for sorting the atoms from the pdb file */
 +typedef struct {
 +  int  resnr;  /* residue number               */
 +  int  j;      /* database order index         */
 +  int  index;  /* original atom number         */
 +  char anm1;   /* second letter of atom name   */
 +  char altloc; /* alternate location indicator */
 +} t_pdbindex;
 +  
 +int pdbicomp(const void *a,const void *b)
 +{
 +  t_pdbindex *pa,*pb;
 +  int d;
 +
 +  pa=(t_pdbindex *)a;
 +  pb=(t_pdbindex *)b;
 +
 +  d = (pa->resnr - pb->resnr);
 +  if (d==0) {
 +    d = (pa->j - pb->j);
 +    if (d==0) {
 +      d = (pa->anm1 - pb->anm1);
 +      if (d==0)
 +      d = (pa->altloc - pb->altloc);
 +    }
 +  }
 +
 +  return d;
 +}
 +
 +static void sort_pdbatoms(int nrtp,t_restp restp[],t_hackblock hb[],
 +                        int natoms,t_atoms **pdbaptr,rvec **x,
 +                        t_blocka *block,char ***gnames)
 +{
 +  t_atoms *pdba,*pdbnew;
 +  rvec **xnew;
 +  int     i,j;
 +  t_restp *rptr;
 +  t_hackblock *hbr;
 +  t_pdbindex *pdbi;
 +  atom_id *a;
 +  char *atomnm;
 +  
 +  pdba=*pdbaptr;
 +  natoms=pdba->nr;
 +  pdbnew=NULL;
 +  snew(xnew,1);
 +  snew(pdbi, natoms);
 +  
 +  for(i=0; i<natoms; i++)
 +  {
 +      atomnm = *pdba->atomname[i];
 +      rptr = &restp[pdba->atom[i].resind];
 +      for(j=0; (j<rptr->natom); j++) 
 +      {
 +          if (gmx_strcasecmp(atomnm,*(rptr->atomname[j])) == 0) 
 +          {
 +              break;
 +          }
 +      }
 +      if (j==rptr->natom) 
 +      {
 +          char buf[STRLEN];
 +          
 +          sprintf(buf,
 +                  "Atom %s in residue %s %d was not found in rtp entry %s with %d atoms\n"
 +                  "while sorting atoms.\n%s",atomnm,
 +                  *pdba->resinfo[pdba->atom[i].resind].name,
 +                  pdba->resinfo[pdba->atom[i].resind].nr,
 +                  rptr->resname,
 +                  rptr->natom,
 +                  is_hydrogen(atomnm) ? 
 +                  "\nFor a hydrogen, this can be a different protonation state, or it\n"
 +                  "might have had a different number in the PDB file and was rebuilt\n"
 +                  "(it might for instance have been H3, and we only expected H1 & H2).\n"
 +                  "Note that hydrogens might have been added to the entry for the N-terminus.\n"
 +                  "Remove this hydrogen or choose a different protonation state to solve it.\n"
 +                  "Option -ignh will ignore all hydrogens in the input." : ".");
 +          gmx_fatal(FARGS,buf);
 +      }
 +      /* make shadow array to be sorted into indexgroup */
 +      pdbi[i].resnr  = pdba->atom[i].resind;
 +      pdbi[i].j      = j;
 +      pdbi[i].index  = i;
 +      pdbi[i].anm1   = atomnm[1];
 +      pdbi[i].altloc = pdba->pdbinfo[i].altloc;
 +  }
 +  qsort(pdbi,natoms,(size_t)sizeof(pdbi[0]),pdbicomp);
 +    
 +  /* pdba is sorted in pdbnew using the pdbi index */ 
 +  snew(a,natoms);
 +  snew(pdbnew,1);
 +  init_t_atoms(pdbnew,natoms,TRUE);
 +  snew(*xnew,natoms);
 +  pdbnew->nr=pdba->nr;
 +  pdbnew->nres=pdba->nres;
 +  sfree(pdbnew->resinfo);
 +  pdbnew->resinfo=pdba->resinfo;
 +  for (i=0; i<natoms; i++) {
 +    pdbnew->atom[i]     = pdba->atom[pdbi[i].index];
 +    pdbnew->atomname[i] = pdba->atomname[pdbi[i].index];
 +    pdbnew->pdbinfo[i]  = pdba->pdbinfo[pdbi[i].index];
 +    copy_rvec((*x)[pdbi[i].index],(*xnew)[i]);
 +     /* make indexgroup in block */
 +    a[i]=pdbi[i].index;
 +  }
 +  /* clean up */
 +  sfree(pdba->atomname);
 +  sfree(pdba->atom);
 +  sfree(pdba->pdbinfo);
 +  sfree(pdba);
 +  sfree(*x);
 +  /* copy the sorted pdbnew back to pdba */
 +  *pdbaptr=pdbnew;
 +  *x=*xnew;
 +  add_grp(block, gnames, natoms, a, "prot_sort");
 +  sfree(xnew);
 +  sfree(a);
 +  sfree(pdbi);
 +}
 +
 +static int remove_duplicate_atoms(t_atoms *pdba,rvec x[],gmx_bool bVerbose)
 +{
 +  int     i,j,oldnatoms,ndel;
 +  t_resinfo *ri;
 +  
 +  printf("Checking for duplicate atoms....\n");
 +  oldnatoms    = pdba->nr;
 +  ndel = 0;
 +  /* NOTE: pdba->nr is modified inside the loop */
 +  for(i=1; (i < pdba->nr); i++) {
 +    /* compare 'i' and 'i-1', throw away 'i' if they are identical 
 +       this is a 'while' because multiple alternate locations can be present */
 +    while ( (i < pdba->nr) &&
 +          (pdba->atom[i-1].resind == pdba->atom[i].resind) &&
 +          (strcmp(*pdba->atomname[i-1],*pdba->atomname[i])==0) ) {
 +      ndel++;
 +      if (bVerbose) {
 +      ri = &pdba->resinfo[pdba->atom[i].resind];
 +      printf("deleting duplicate atom %4s  %s%4d%c",
 +             *pdba->atomname[i],*ri->name,ri->nr,ri->ic);
 +      if (ri->chainid && (ri->chainid != ' '))
 +        printf(" ch %c", ri->chainid);
 +      if (pdba->pdbinfo) {
 +        if (pdba->pdbinfo[i].atomnr)
 +          printf("  pdb nr %4d",pdba->pdbinfo[i].atomnr);
 +        if (pdba->pdbinfo[i].altloc && (pdba->pdbinfo[i].altloc!=' '))
 +          printf("  altloc %c",pdba->pdbinfo[i].altloc);
 +      }
 +      printf("\n");
 +      }
 +      pdba->nr--;
 +      /* We can not free, since it might be in the symtab */
 +      /* sfree(pdba->atomname[i]); */
 +      for (j=i; j < pdba->nr; j++) {
 +      pdba->atom[j]     = pdba->atom[j+1];
 +      pdba->atomname[j] = pdba->atomname[j+1];
 +      pdba->pdbinfo[j]  = pdba->pdbinfo[j+1];
 +      copy_rvec(x[j+1],x[j]);
 +      }
 +      srenew(pdba->atom,     pdba->nr);
 +      /* srenew(pdba->atomname, pdba->nr); */
 +      srenew(pdba->pdbinfo,  pdba->nr);
 +    }
 +  }
 +  if (pdba->nr != oldnatoms)
 +    printf("Now there are %d atoms. Deleted %d duplicates.\n",pdba->nr,ndel);
 +  
 +  return pdba->nr;
 +}
 +
 +void find_nc_ter(t_atoms *pdba,int r0,int r1,int *r_start,int *r_end,gmx_residuetype_t rt)
 +{
 +    int i;
 +    const char *p_startrestype;
 +    const char *p_restype;
 +    int         nstartwarn,nendwarn;
 +    
 +    *r_start = -1;
 +    *r_end   = -1;
 +
 +    nstartwarn = 0;
 +    nendwarn   = 0;
 +    
 +    /* Find the starting terminus (typially N or 5') */
 +    for(i=r0;i<r1 && *r_start==-1;i++)
 +    {
 +        gmx_residuetype_get_type(rt,*pdba->resinfo[i].name,&p_startrestype);
 +        if( !gmx_strcasecmp(p_startrestype,"Protein") || !gmx_strcasecmp(p_startrestype,"DNA") || !gmx_strcasecmp(p_startrestype,"RNA") )
 +        {
 +            printf("Identified residue %s%d as a starting terminus.\n",*pdba->resinfo[i].name,pdba->resinfo[i].nr);
 +            *r_start=i;
 +        }
 +        else 
 +        {            
 +            if(nstartwarn < 5)
 +            {    
 +                printf("Warning: Starting residue %s%d in chain not identified as Protein/RNA/DNA.\n",*pdba->resinfo[i].name,pdba->resinfo[i].nr);
 +            }
 +            if(nstartwarn == 5)
 +            {
 +                printf("More than 5 unidentified residues at start of chain - disabling further warnings.\n");
 +            }
 +            nstartwarn++;
 +        }
 +    }
 +
 +    if(*r_start>=0)
 +    {
 +        /* Go through the rest of the residues, check that they are the same class, and identify the ending terminus. */
 +        for(i=*r_start;i<r1;i++)
 +        {
 +            gmx_residuetype_get_type(rt,*pdba->resinfo[i].name,&p_restype);
 +            if( !gmx_strcasecmp(p_restype,p_startrestype) && nendwarn==0)
 +            {
 +                *r_end=i;
 +            }
 +            else 
 +            {
 +                if(nendwarn < 5)
 +                {    
 +                    printf("Warning: Residue %s%d in chain has different type (%s) from starting residue %s%d (%s).\n",
 +                           *pdba->resinfo[i].name,pdba->resinfo[i].nr,p_restype,
 +                           *pdba->resinfo[*r_start].name,pdba->resinfo[*r_start].nr,p_startrestype);
 +                }
 +                if(nendwarn == 5)
 +                {
 +                    printf("More than 5 unidentified residues at end of chain - disabling further warnings.\n");
 +                }
 +                nendwarn++;                
 +            }
 +        }  
 +    }
 +    
 +    if(*r_end>=0)
 +    {
 +        printf("Identified residue %s%d as a ending terminus.\n",*pdba->resinfo[*r_end].name,pdba->resinfo[*r_end].nr);
 +    }
 +}
 +
 +
 +static void
 +modify_chain_numbers(t_atoms *       pdba,
 +                     const char *    chainsep)
 +{
 +    int   i;
 +    char  old_prev_chainid;
 +    char  old_this_chainid;
 +    int   old_prev_chainnum;
 +    int   old_this_chainnum;
 +    t_resinfo *ri;
 +    char  select[STRLEN];
 +    int   new_chainnum;
 +    int           this_atomnum;
 +    int           prev_atomnum;
 +    const char *  prev_atomname;
 +    const char *  this_atomname;
 +    const char *  prev_resname;
 +    const char *  this_resname;
 +    int           prev_resnum;
 +    int           this_resnum;
 +    char          prev_chainid;
 +    char          this_chainid;
 +    int           prev_chainnumber;
 +    int           this_chainnumber;
 +   
 +    enum 
 +    { 
 +        SPLIT_ID_OR_TER, 
 +        SPLIT_ID_AND_TER,
 +        SPLIT_ID_ONLY,
 +        SPLIT_TER_ONLY,
 +        SPLIT_INTERACTIVE
 +    }
 +    splitting;
 +    
 +    splitting = SPLIT_TER_ONLY; /* keep compiler happy */
 +    
 +    /* Be a bit flexible to catch typos */
 +    if (!strncmp(chainsep,"id_o",4))
 +    {
 +        /* For later interactive splitting we tentatively assign new chain numbers at either changing id or ter records */
 +        splitting = SPLIT_ID_OR_TER;
 +        printf("Splitting chemical chains based on TER records or chain id changing.\n");
 +    }
 +    else if (!strncmp(chainsep,"int",3))
 +    {
 +        /* For later interactive splitting we tentatively assign new chain numbers at either changing id or ter records */
 +        splitting = SPLIT_INTERACTIVE;
 +        printf("Splitting chemical chains interactively.\n");
 +    }
 +    else if (!strncmp(chainsep,"id_a",4))
 +    {
 +        splitting = SPLIT_ID_AND_TER;
 +        printf("Splitting chemical chains based on TER records and chain id changing.\n");
 +    }
 +    else if (strlen(chainsep)==2 && !strncmp(chainsep,"id",4))
 +    {
 +        splitting = SPLIT_ID_ONLY;
 +        printf("Splitting chemical chains based on changing chain id only (ignoring TER records).\n");
 +    }
 +    else if (chainsep[0]=='t')
 +    {
 +        splitting = SPLIT_TER_ONLY;
 +        printf("Splitting chemical chains based on TER records only (ignoring chain id).\n");
 +    }
 +    else
 +    {
 +        gmx_fatal(FARGS,"Unidentified setting for chain separation: %s\n",chainsep);
 +    }                                                                           
 +                                                                                   
 +    /* The default chain enumeration is based on TER records only, which is reflected in chainnum below */
 +    
 +    old_prev_chainid  = '?';
 +    old_prev_chainnum = -1;
 +    new_chainnum  = -1;
 +    
 +    this_atomname       = NULL;
 +    this_atomnum        = -1;
 +    this_resname        = NULL;
 +    this_resnum         = -1;
 +    this_chainid        = '?';
 +    this_chainnumber    = -1;
 +
 +    for(i=0;i<pdba->nres;i++)
 +    {
 +        ri = &pdba->resinfo[i];
 +        old_this_chainid   = ri->chainid;
 +        old_this_chainnum  = ri->chainnum;
 +
 +        prev_atomname      = this_atomname;
 +        prev_atomnum       = this_atomnum;
 +        prev_resname       = this_resname;
 +        prev_resnum        = this_resnum;
 +        prev_chainid       = this_chainid;
 +        prev_chainnumber   = this_chainnumber;
 +
 +        this_atomname      = *(pdba->atomname[i]);
 +        this_atomnum       = (pdba->pdbinfo != NULL) ? pdba->pdbinfo[i].atomnr : i+1;
 +        this_resname       = *ri->name;
 +        this_resnum        = ri->nr;
 +        this_chainid       = ri->chainid;
 +        this_chainnumber   = ri->chainnum;
 +
 +        switch (splitting)
 +        {
 +            case SPLIT_ID_OR_TER:
 +                if(old_this_chainid != old_prev_chainid || old_this_chainnum != old_prev_chainnum)
 +                {
 +                    new_chainnum++;
 +                }
 +                break;
 +                
 +            case SPLIT_ID_AND_TER:
 +                if(old_this_chainid != old_prev_chainid && old_this_chainnum != old_prev_chainnum)
 +                {
 +                    new_chainnum++;
 +                }
 +                break;
 +                
 +            case SPLIT_ID_ONLY:
 +                if(old_this_chainid != old_prev_chainid)
 +                {
 +                    new_chainnum++;
 +                }
 +                break;
 +                
 +            case SPLIT_TER_ONLY:
 +                if(old_this_chainnum != old_prev_chainnum)
 +                {
 +                    new_chainnum++;
 +                }
 +                break;
 +            case SPLIT_INTERACTIVE:
 +                if(old_this_chainid != old_prev_chainid || old_this_chainnum != old_prev_chainnum)
 +                {
 +                    if(i>0)
 +                    {
 +                        printf("Split the chain (and introduce termini) between residue %s%d (chain id '%c', atom %d %s)\n" 
 +                               "and residue %s%d (chain id '%c', atom %d %s) ? [n/y]\n",
 +                               prev_resname,prev_resnum,prev_chainid,prev_atomnum,prev_atomname,
 +                               this_resname,this_resnum,this_chainid,this_atomnum,this_atomname);
 +                        
 +                        if(NULL==fgets(select,STRLEN-1,stdin))
 +                        {
 +                            gmx_fatal(FARGS,"Error reading from stdin");
 +                        }
 +                    }
 +                    if(i==0 || select[0] == 'y')
 +                    {
 +                        new_chainnum++;
 +                    }
 +                }               
 +                break;
 +            default:
 +                gmx_fatal(FARGS,"Internal inconsistency - this shouldn't happen...");
 +                break;
 +        }
 +        old_prev_chainid  = old_this_chainid;
 +        old_prev_chainnum = old_this_chainnum;
 +                                                                                   
 +        ri->chainnum = new_chainnum;        
 +    }
 +}
 +
 +
 +typedef struct {
 +  char chainid;
 +  char chainnum;
 +  int  start;
 +  int  natom;
 +  gmx_bool bAllWat;
 +  int  nterpairs;
 +  int  *chainstart;
 +} t_pdbchain;
 +
 +typedef struct {
 +  char chainid;
 +  int  chainnum;
 +  gmx_bool bAllWat;
 +  int nterpairs;
 +  int *chainstart;
 +  t_hackblock **ntdb;
 +  t_hackblock **ctdb;
 +  int *r_start;
 +  int *r_end;
 +  t_atoms *pdba;
 +  rvec *x;
 +} t_chain;
 +
 +int cmain(int argc, char *argv[])
 +{
 +  const char *desc[] = {
 +    "This program reads a [TT].pdb[tt] (or [TT].gro[tt]) file, reads",
 +    "some database files, adds hydrogens to the molecules and generates",
 +    "coordinates in GROMACS (GROMOS), or optionally [TT].pdb[tt], format",
 +    "and a topology in GROMACS format.",
 +    "These files can subsequently be processed to generate a run input file.",
 +    "[PAR]",
 +    "[TT]pdb2gmx[tt] will search for force fields by looking for",
 +    "a [TT]forcefield.itp[tt] file in subdirectories [TT]<forcefield>.ff[tt]",
 +    "of the current working directory and of the GROMACS library directory",
 +    "as inferred from the path of the binary or the [TT]GMXLIB[tt] environment",
 +    "variable.",
 +    "By default the forcefield selection is interactive,",
 +    "but you can use the [TT]-ff[tt] option to specify one of the short names",
 +    "in the list on the command line instead. In that case [TT]pdb2gmx[tt] just looks",
 +    "for the corresponding [TT]<forcefield>.ff[tt] directory.",
 +    "[PAR]",
 +    "After choosing a force field, all files will be read only from",
 +    "the corresponding force field directory.",
 +    "If you want to modify or add a residue types, you can copy the force",
 +    "field directory from the GROMACS library directory to your current",
 +    "working directory. If you want to add new protein residue types,",
 +    "you will need to modify [TT]residuetypes.dat[tt] in the library directory",
 +    "or copy the whole library directory to a local directory and set",
 +    "the environment variable [TT]GMXLIB[tt] to the name of that directory.",
 +    "Check Chapter 5 of the manual for more information about file formats.",
 +    "[PAR]",
 +    
 +    "Note that a [TT].pdb[tt] file is nothing more than a file format, and it",
 +    "need not necessarily contain a protein structure. Every kind of",
 +    "molecule for which there is support in the database can be converted.",
 +    "If there is no support in the database, you can add it yourself.[PAR]",
 +    
 +    "The program has limited intelligence, it reads a number of database",
 +    "files, that allow it to make special bonds (Cys-Cys, Heme-His, etc.),",
 +    "if necessary this can be done manually. The program can prompt the",
 +    "user to select which kind of LYS, ASP, GLU, CYS or HIS residue is",
 +    "desired. For Lys the choice is between neutral (two protons on NZ) or",
 +    "protonated (three protons, default), for Asp and Glu unprotonated",
 +    "(default) or protonated, for His the proton can be either on ND1,",
 +    "on NE2 or on both. By default these selections are done automatically.",
 +    "For His, this is based on an optimal hydrogen bonding",
 +    "conformation. Hydrogen bonds are defined based on a simple geometric",
 +    "criterion, specified by the maximum hydrogen-donor-acceptor angle",
 +    "and donor-acceptor distance, which are set by [TT]-angle[tt] and",
 +    "[TT]-dist[tt] respectively.[PAR]",
 +     
 +    "The protonation state of N- and C-termini can be chosen interactively",
 +    "with the [TT]-ter[tt] flag.  Default termini are ionized (NH3+ and COO-),",
 +    "respectively.  Some force fields support zwitterionic forms for chains of",
 +    "one residue, but for polypeptides these options should NOT be selected.",
 +    "The AMBER force fields have unique forms for the terminal residues,",
 +    "and these are incompatible with the [TT]-ter[tt] mechanism. You need",
 +    "to prefix your N- or C-terminal residue names with \"N\" or \"C\"",
 +    "respectively to use these forms, making sure you preserve the format",
 +    "of the coordinate file. Alternatively, use named terminating residues",
 +    "(e.g. ACE, NME).[PAR]",
 +
 +    "The separation of chains is not entirely trivial since the markup",
 +    "in user-generated PDB files frequently varies and sometimes it",
 +    "is desirable to merge entries across a TER record, for instance",
 +    "if you want a disulfide bridge or distance restraints between",
 +    "two protein chains or if you have a HEME group bound to a protein.",
 +    "In such cases multiple chains should be contained in a single",
 +    "[TT]moleculetype[tt] definition.",
 +    "To handle this, [TT]pdb2gmx[tt] uses two separate options.",
 +    "First, [TT]-chainsep[tt] allows you to choose when a new chemical chain should",
 +    "start, and termini added when applicable. This can be done based on the",
 +    "existence of TER records, when the chain id changes, or combinations of either",
 +    "or both of these. You can also do the selection fully interactively.",
 +    "In addition, there is a [TT]-merge[tt] option that controls how multiple chains",
 +    "are merged into one moleculetype, after adding all the chemical termini (or not).",
 +    "This can be turned off (no merging), all non-water chains can be merged into a",
 +    "single molecule, or the selection can be done interactively.[PAR]",
 +      
 +    "[TT]pdb2gmx[tt] will also check the occupancy field of the [TT].pdb[tt] file.",
 +    "If any of the occupancies are not one, indicating that the atom is",
 +    "not resolved well in the structure, a warning message is issued.",
 +    "When a [TT].pdb[tt] file does not originate from an X-ray structure determination",
 +    "all occupancy fields may be zero. Either way, it is up to the user",
 +    "to verify the correctness of the input data (read the article!).[PAR]", 
 +    
 +    "During processing the atoms will be reordered according to GROMACS",
 +    "conventions. With [TT]-n[tt] an index file can be generated that",
 +    "contains one group reordered in the same way. This allows you to",
 +    "convert a GROMOS trajectory and coordinate file to GROMOS. There is",
 +    "one limitation: reordering is done after the hydrogens are stripped",
 +    "from the input and before new hydrogens are added. This means that",
 +    "you should not use [TT]-ignh[tt].[PAR]",
 +
 +    "The [TT].gro[tt] and [TT].g96[tt] file formats do not support chain",
 +    "identifiers. Therefore it is useful to enter a [TT].pdb[tt] file name at",
 +    "the [TT]-o[tt] option when you want to convert a multi-chain [TT].pdb[tt] file.",
 +    "[PAR]",
 +    
 +    "The option [TT]-vsite[tt] removes hydrogen and fast improper dihedral",
 +    "motions. Angular and out-of-plane motions can be removed by changing",
 +    "hydrogens into virtual sites and fixing angles, which fixes their",
 +    "position relative to neighboring atoms. Additionally, all atoms in the",
 +    "aromatic rings of the standard amino acids (i.e. PHE, TRP, TYR and HIS)",
 +    "can be converted into virtual sites, eliminating the fast improper dihedral",
 +    "fluctuations in these rings. [BB]Note[bb] that in this case all other hydrogen",
 +    "atoms are also converted to virtual sites. The mass of all atoms that are",
 +    "converted into virtual sites, is added to the heavy atoms.[PAR]",
 +    "Also slowing down of dihedral motion can be done with [TT]-heavyh[tt]",
 +    "done by increasing the hydrogen-mass by a factor of 4. This is also",
 +    "done for water hydrogens to slow down the rotational motion of water.",
 +    "The increase in mass of the hydrogens is subtracted from the bonded",
 +    "(heavy) atom so that the total mass of the system remains the same."
 +  };
 +
 +  
 +  FILE       *fp,*top_file,*top_file2,*itp_file=NULL;
 +  int        natom,nres;
 +  t_atoms    pdba_all,*pdba;
 +  t_atoms    *atoms;
 +  t_resinfo  *ri;
 +  t_blocka   *block;
 +  int        chain,nch,maxch,nwaterchain;
 +  t_pdbchain *pdb_ch;
 +  t_chain    *chains,*cc;
 +  char       select[STRLEN];
 +  int        nincl,nmol;
 +  char       **incls;
 +  t_mols     *mols;
 +  char       **gnames;
 +  int        ePBC;
 +  matrix     box;
 +  rvec       box_space;
 +  int        i,j,k,l,nrtp;
 +  int        *swap_index,si;
 +  t_restp    *restp;
 +  t_hackblock *ah;
 +  t_symtab   symtab;
 +  gpp_atomtype_t atype;
 +  gmx_residuetype_t rt;
 +  const char *top_fn;
 +  char       fn[256],itp_fn[STRLEN],posre_fn[STRLEN],buf_fn[STRLEN];
 +  char       molname[STRLEN],title[STRLEN],quote[STRLEN],generator[STRLEN];
 +  char       *c,forcefield[STRLEN],ffdir[STRLEN];
 +  char       ffname[STRLEN],suffix[STRLEN],buf[STRLEN];
 +  char       *watermodel;
 +  const char *watres;
 +  int        nrtpf;
 +  char       **rtpf;
 +  char       rtp[STRLEN];
 +  int        nrrn;
 +  char       **rrn;
 +  int        nrtprename,naa;
 +  rtprename_t *rtprename=NULL;
 +  int        nah,nNtdb,nCtdb,ntdblist;
 +  t_hackblock *ntdb,*ctdb,**tdblist;
 +  int        nssbonds;
 +  t_ssbond   *ssbonds;
 +  rvec       *pdbx,*x;
 +  gmx_bool       bVsites=FALSE,bWat,bPrevWat=FALSE,bITP,bVsiteAromatics=FALSE,bCheckMerge;
 +  real       mHmult=0;
 +  t_hackblock *hb_chain;
 +  t_restp    *restp_chain;
 +  output_env_t oenv;
 +  const char *p_restype;
 +  int        rc;
 +  int           this_atomnum;
 +  int           prev_atomnum;
 +  const char *  prev_atomname;
 +  const char *  this_atomname;
 +  const char *  prev_resname;
 +  const char *  this_resname;
 +  int           prev_resnum;
 +  int           this_resnum;
 +  char          prev_chainid;
 +  char          this_chainid;
 +  int           prev_chainnumber;
 +  int           this_chainnumber;
 +  int           nid_used;
 +  int           this_chainstart;
 +  int           prev_chainstart;
 +  gmx_bool      bMerged;
 +  int           nchainmerges;
 +    
 +  gmx_atomprop_t aps;
 +  
 +  t_filenm   fnm[] = { 
 +    { efSTX, "-f", "eiwit.pdb", ffREAD  },
 +    { efSTO, "-o", "conf",      ffWRITE },
 +    { efTOP, NULL, NULL,        ffWRITE },
 +    { efITP, "-i", "posre",     ffWRITE },
 +    { efNDX, "-n", "clean",     ffOPTWR },
 +    { efSTO, "-q", "clean.pdb", ffOPTWR }
 +  };
 +#define NFILE asize(fnm)
 + 
 +
 +  /* Command line arguments must be static */
 +  static gmx_bool bNewRTP=FALSE;
 +  static gmx_bool bInter=FALSE, bCysMan=FALSE; 
 +  static gmx_bool bLysMan=FALSE, bAspMan=FALSE, bGluMan=FALSE, bHisMan=FALSE;
 +  static gmx_bool bGlnMan=FALSE, bArgMan=FALSE;
 +  static gmx_bool bTerMan=FALSE, bUnA=FALSE, bHeavyH;
 +  static gmx_bool bSort=TRUE, bAllowMissing=FALSE, bRemoveH=FALSE;
 +  static gmx_bool bDeuterate=FALSE,bVerbose=FALSE,bChargeGroups=TRUE,bCmap=TRUE;
 +  static gmx_bool bRenumRes=FALSE,bRTPresname=FALSE;
 +  static real angle=135.0, distance=0.3,posre_fc=1000;
 +  static real long_bond_dist=0.25, short_bond_dist=0.05;
 +  static const char *vsitestr[] = { NULL, "none", "hydrogens", "aromatics", NULL };
 +  static const char *watstr[] = { NULL, "select", "none", "spc", "spce", "tip3p", "tip4p", "tip5p", NULL };
 +  static const char *chainsep[] = { NULL, "id_or_ter", "id_and_ter", "ter", "id", "interactive", NULL };
 +  static const char *merge[] = {NULL, "no", "all", "interactive", NULL };
 +  static const char *ff = "select";
 +
 +  t_pargs pa[] = {
 +    { "-newrtp", FALSE, etBOOL, {&bNewRTP},
 +      "HIDDENWrite the residue database in new format to [TT]new.rtp[tt]"},
 +    { "-lb",     FALSE, etREAL, {&long_bond_dist},
 +      "HIDDENLong bond warning distance" },
 +    { "-sb",     FALSE, etREAL, {&short_bond_dist},
 +      "HIDDENShort bond warning distance" },
 +    { "-chainsep", FALSE, etENUM, {chainsep},
 +      "Condition in PDB files when a new chain should be started (adding termini)" },
 +    { "-merge",  FALSE, etENUM, {&merge},
 +      "Merge multiple chains into a single [moleculetype]" },         
 +    { "-ff",     FALSE, etSTR,  {&ff},
 +      "Force field, interactive by default. Use [TT]-h[tt] for information." },
 +    { "-water",  FALSE, etENUM, {watstr},
 +      "Water model to use" },
 +    { "-inter",  FALSE, etBOOL, {&bInter},
 +      "Set the next 8 options to interactive"},
 +    { "-ss",     FALSE, etBOOL, {&bCysMan}, 
 +      "Interactive SS bridge selection" },
 +    { "-ter",    FALSE, etBOOL, {&bTerMan}, 
 +      "Interactive termini selection, instead of charged (default)" },
 +    { "-lys",    FALSE, etBOOL, {&bLysMan}, 
 +      "Interactive lysine selection, instead of charged" },
 +    { "-arg",    FALSE, etBOOL, {&bArgMan}, 
 +      "Interactive arginine selection, instead of charged" },
 +    { "-asp",    FALSE, etBOOL, {&bAspMan}, 
 +      "Interactive aspartic acid selection, instead of charged" },
 +    { "-glu",    FALSE, etBOOL, {&bGluMan}, 
 +      "Interactive glutamic acid selection, instead of charged" },
 +    { "-gln",    FALSE, etBOOL, {&bGlnMan}, 
 +      "Interactive glutamine selection, instead of neutral" },
 +    { "-his",    FALSE, etBOOL, {&bHisMan},
 +      "Interactive histidine selection, instead of checking H-bonds" },
 +    { "-angle",  FALSE, etREAL, {&angle}, 
 +      "Minimum hydrogen-donor-acceptor angle for a H-bond (degrees)" },
 +    { "-dist",   FALSE, etREAL, {&distance},
 +      "Maximum donor-acceptor distance for a H-bond (nm)" },
 +    { "-una",    FALSE, etBOOL, {&bUnA}, 
 +      "Select aromatic rings with united CH atoms on phenylalanine, "
 +      "tryptophane and tyrosine" },
 +    { "-sort",   FALSE, etBOOL, {&bSort}, 
 +      "HIDDENSort the residues according to database, turning this off is dangerous as charge groups might be broken in parts" },
 +    { "-ignh",   FALSE, etBOOL, {&bRemoveH}, 
 +      "Ignore hydrogen atoms that are in the coordinate file" },
 +    { "-missing",FALSE, etBOOL, {&bAllowMissing}, 
 +      "Continue when atoms are missing, dangerous" },
 +    { "-v",      FALSE, etBOOL, {&bVerbose}, 
 +      "Be slightly more verbose in messages" },
 +    { "-posrefc",FALSE, etREAL, {&posre_fc},
 +      "Force constant for position restraints" },
 +    { "-vsite",  FALSE, etENUM, {vsitestr}, 
 +      "Convert atoms to virtual sites" },
 +    { "-heavyh", FALSE, etBOOL, {&bHeavyH},
 +      "Make hydrogen atoms heavy" },
 +    { "-deuterate", FALSE, etBOOL, {&bDeuterate},
 +      "Change the mass of hydrogens to 2 amu" },
 +    { "-chargegrp", TRUE, etBOOL, {&bChargeGroups},
 +      "Use charge groups in the [TT].rtp[tt] file"  },
 +    { "-cmap", TRUE, etBOOL, {&bCmap},
 +      "Use cmap torsions (if enabled in the [TT].rtp[tt] file)"  },
 +    { "-renum", TRUE, etBOOL, {&bRenumRes},
 +      "Renumber the residues consecutively in the output"  },
 +    { "-rtpres", TRUE, etBOOL, {&bRTPresname},
 +      "Use [TT].rtp[tt] entry names as residue names"  }
 +  };
 +#define NPARGS asize(pa)
 +  
 +  CopyRight(stderr,argv[0]);
 +  parse_common_args(&argc,argv,0,NFILE,fnm,asize(pa),pa,asize(desc),desc,
 +                  0,NULL,&oenv);
 +
 +  /* Force field selection, interactive or direct */
 +  choose_ff(strcmp(ff,"select") == 0 ? NULL : ff,
 +          forcefield,sizeof(forcefield),
 +          ffdir,sizeof(ffdir));
 +
 +  if (strlen(forcefield) > 0) {
 +    strcpy(ffname,forcefield);
 +    ffname[0] = toupper(ffname[0]);
 +  } else {
 +    gmx_fatal(FARGS,"Empty forcefield string");
 +  }
 +  
 +  printf("\nUsing the %s force field in directory %s\n\n",
 +       ffname,ffdir);
 +    
 +  choose_watermodel(watstr[0],ffdir,&watermodel);
 +
 +  if (bInter) {
 +    /* if anything changes here, also change description of -inter */
 +    bCysMan = TRUE;
 +    bTerMan = TRUE;
 +    bLysMan = TRUE;
 +    bArgMan = TRUE;
 +    bAspMan = TRUE;
 +    bGluMan = TRUE;
 +    bGlnMan = TRUE;
 +    bHisMan = TRUE;
 +  }
 +  
 +  if (bHeavyH)
 +    mHmult=4.0;
 +  else if (bDeuterate)
 +    mHmult=2.0;
 +  else
 +    mHmult=1.0;
 +  
 +  switch(vsitestr[0][0]) {
 +  case 'n': /* none */
 +    bVsites=FALSE;
 +    bVsiteAromatics=FALSE;
 +    break;
 +  case 'h': /* hydrogens */
 +    bVsites=TRUE;
 +    bVsiteAromatics=FALSE;
 +    break;
 +  case 'a': /* aromatics */
 +    bVsites=TRUE;
 +    bVsiteAromatics=TRUE;
 +    break;
 +  default:
 +    gmx_fatal(FARGS,"DEATH HORROR in $s (%d): vsitestr[0]='%s'",
 +              __FILE__,__LINE__,vsitestr[0]);
 +  }/* end switch */
 +  
 +  /* Open the symbol table */
 +  open_symtab(&symtab);
 +
 +  /* Residue type database */  
 +  gmx_residuetype_init(&rt);
 +  
 +  /* Read residue renaming database(s), if present */
 +  nrrn = fflib_search_file_end(ffdir,".r2b",FALSE,&rrn);
 +    
 +  nrtprename = 0;
 +  rtprename  = NULL;
 +  for(i=0; i<nrrn; i++) {
 +    fp = fflib_open(rrn[i]);
 +    read_rtprename(rrn[i],fp,&nrtprename,&rtprename);
 +    ffclose(fp);
 +    sfree(rrn[i]);
 +  }
 +  sfree(rrn);
 +
 +  /* Add all alternative names from the residue renaming database to the list of recognized amino/nucleic acids. */
 +  naa=0;
 +  for(i=0;i<nrtprename;i++)
 +  {
 +      rc=gmx_residuetype_get_type(rt,rtprename[i].gmx,&p_restype);
 +
 +      /* Only add names if the 'standard' gromacs/iupac base name was found */
 +      if(rc==0)
 +      {
 +          gmx_residuetype_add(rt,rtprename[i].main,p_restype);
 +          gmx_residuetype_add(rt,rtprename[i].nter,p_restype);
 +          gmx_residuetype_add(rt,rtprename[i].cter,p_restype);
 +          gmx_residuetype_add(rt,rtprename[i].bter,p_restype);
 +      }          
 +  }
 +    
 +  clear_mat(box);
 +  if (watermodel != NULL && (strstr(watermodel,"4p") ||
 +                           strstr(watermodel,"4P"))) {
 +    watres = "HO4";
 +  } else if (watermodel != NULL && (strstr(watermodel,"5p") ||
 +                                  strstr(watermodel,"5P"))) {
 +    watres = "HO5";
 +  } else {
 +    watres = "HOH";
 +  }
 +    
 +  aps = gmx_atomprop_init();
 +  natom = read_pdball(opt2fn("-f",NFILE,fnm),opt2fn_null("-q",NFILE,fnm),title,
 +                    &pdba_all,&pdbx,&ePBC,box,bRemoveH,&symtab,rt,watres,
 +                    aps,bVerbose);
 +  
 +  if (natom==0)
 +    gmx_fatal(FARGS,"No atoms found in pdb file %s\n",opt2fn("-f",NFILE,fnm));
 +
 +  printf("Analyzing pdb file\n");
 +  nch=0;
 +  maxch=0;
 +  nwaterchain=0;
 +    
 +  modify_chain_numbers(&pdba_all,chainsep[0]);
 +
 +  nchainmerges        = 0;
 +    
 +  this_atomname       = NULL;
 +  this_atomnum        = -1;
 +  this_resname        = NULL;
 +  this_resnum         = -1;
 +  this_chainid        = '?';
 +  this_chainnumber    = -1;
 +  this_chainstart     = 0;
 +  /* Keep the compiler happy */
 +  prev_chainstart     = 0;
 +    
 +  pdb_ch=NULL;
 +
 +  bMerged = FALSE;
 +  for (i=0; (i<natom); i++) 
 +  {
 +      ri = &pdba_all.resinfo[pdba_all.atom[i].resind];
 +
 +      prev_atomname      = this_atomname;
 +      prev_atomnum       = this_atomnum;
 +      prev_resname       = this_resname;
 +      prev_resnum        = this_resnum;
 +      prev_chainid       = this_chainid;
 +      prev_chainnumber   = this_chainnumber;
 +      if (!bMerged)
 +      {
 +          prev_chainstart    = this_chainstart;
 +      }
 +      
 +      this_atomname      = *pdba_all.atomname[i];
 +      this_atomnum       = (pdba_all.pdbinfo != NULL) ? pdba_all.pdbinfo[i].atomnr : i+1;
 +      this_resname       = *ri->name;
 +      this_resnum        = ri->nr;
 +      this_chainid       = ri->chainid;
 +      this_chainnumber   = ri->chainnum;
 +      
 +      bWat = gmx_strcasecmp(*ri->name,watres) == 0;
 +      if ((i == 0) || (this_chainnumber != prev_chainnumber) || (bWat != bPrevWat)) 
 +      {
 +          this_chainstart = pdba_all.atom[i].resind;
 +          
 +          bMerged = FALSE;
 +          if (i>0 && !bWat) 
 +          {
 +              if(!strncmp(merge[0],"int",3))
 +              {
 +                  printf("Merge chain ending with residue %s%d (chain id '%c', atom %d %s) and chain starting with\n"
 +                         "residue %s%d (chain id '%c', atom %d %s) into a single moleculetype (keeping termini)? [n/y]\n",
 +                         prev_resname,prev_resnum,prev_chainid,prev_atomnum,prev_atomname,
 +                         this_resname,this_resnum,this_chainid,this_atomnum,this_atomname);
 +                  
 +                  if(NULL==fgets(select,STRLEN-1,stdin))
 +                  {
 +                      gmx_fatal(FARGS,"Error reading from stdin");
 +                  }
 +                  bMerged = (select[0] == 'y');
 +              }
 +              else if(!strncmp(merge[0],"all",3))
 +              {
 +                  bMerged = TRUE;
 +              }
 +          }
 +          
 +          if (bMerged)
 +          { 
 +              pdb_ch[nch-1].chainstart[pdb_ch[nch-1].nterpairs] = 
 +              pdba_all.atom[i].resind - prev_chainstart;
 +              pdb_ch[nch-1].nterpairs++;
 +              srenew(pdb_ch[nch-1].chainstart,pdb_ch[nch-1].nterpairs+1);
 +              nchainmerges++;
 +          }
 +          else 
 +          {
 +              /* set natom for previous chain */
 +              if (nch > 0)
 +              {
 +                  pdb_ch[nch-1].natom=i-pdb_ch[nch-1].start;
 +              }
 +              if (bWat)
 +              {
 +                  nwaterchain++;
 +                  ri->chainid = ' ';
 +              }
 +              /* check if chain identifier was used before */
 +              for (j=0; (j<nch); j++) 
 +              {
 +                  if (pdb_ch[j].chainid != ' ' && pdb_ch[j].chainid == ri->chainid) 
 +                  {
 +                      printf("WARNING: Chain identifier '%c' is used in two non-sequential blocks.\n"
 +                             "They will be treated as separate chains unless you reorder your file.\n",
 +                             ri->chainid);
 +                  }
 +              }
 +              if (nch == maxch)
 +              {
 +                  maxch += 16;
 +                  srenew(pdb_ch,maxch);
 +              }
 +              pdb_ch[nch].chainid = ri->chainid;
 +              pdb_ch[nch].chainnum = ri->chainnum; 
 +              pdb_ch[nch].start=i;
 +              pdb_ch[nch].bAllWat=bWat;
 +              if (bWat)
 +                  pdb_ch[nch].nterpairs=0;
 +              else
 +                  pdb_ch[nch].nterpairs=1;
 +              snew(pdb_ch[nch].chainstart,pdb_ch[nch].nterpairs+1);
 +              /* modified [nch] to [0] below */
 +              pdb_ch[nch].chainstart[0]=0;
 +              nch++;
 +          }
 +      }
 +      bPrevWat=bWat;
 +  }
 +  pdb_ch[nch-1].natom=natom-pdb_ch[nch-1].start;
 +  
 +  /* set all the water blocks at the end of the chain */
 +  snew(swap_index,nch);
 +  j=0;
 +  for(i=0; i<nch; i++)
 +    if (!pdb_ch[i].bAllWat) {
 +      swap_index[j]=i;
 +      j++;
 +    }
 +  for(i=0; i<nch; i++)
 +    if (pdb_ch[i].bAllWat) {
 +      swap_index[j]=i;
 +      j++;
 +    }
 +  if (nwaterchain>1)
 +    printf("Moved all the water blocks to the end\n");
 +
 +  snew(chains,nch);
 +  /* copy pdb data and x for all chains */
 +  for (i=0; (i<nch); i++) {
 +    si=swap_index[i];
 +    chains[i].chainid = pdb_ch[si].chainid;
 +    chains[i].chainnum = pdb_ch[si].chainnum;
 +    chains[i].bAllWat = pdb_ch[si].bAllWat;
 +    chains[i].nterpairs = pdb_ch[si].nterpairs;
 +    chains[i].chainstart = pdb_ch[si].chainstart;
 +    snew(chains[i].ntdb,pdb_ch[si].nterpairs);
 +    snew(chains[i].ctdb,pdb_ch[si].nterpairs);
 +    snew(chains[i].r_start,pdb_ch[si].nterpairs);
 +    snew(chains[i].r_end,pdb_ch[si].nterpairs);
 +      
 +    snew(chains[i].pdba,1);
 +    init_t_atoms(chains[i].pdba,pdb_ch[si].natom,TRUE);
 +    snew(chains[i].x,chains[i].pdba->nr);
 +    for (j=0; j<chains[i].pdba->nr; j++) {
 +      chains[i].pdba->atom[j] = pdba_all.atom[pdb_ch[si].start+j];
 +      snew(chains[i].pdba->atomname[j],1);
 +      *chains[i].pdba->atomname[j] = 
 +      strdup(*pdba_all.atomname[pdb_ch[si].start+j]);
 +      chains[i].pdba->pdbinfo[j] = pdba_all.pdbinfo[pdb_ch[si].start+j];
 +      copy_rvec(pdbx[pdb_ch[si].start+j],chains[i].x[j]);
 +    }
 +    /* Re-index the residues assuming that the indices are continuous */
 +    k    = chains[i].pdba->atom[0].resind;
 +    nres = chains[i].pdba->atom[chains[i].pdba->nr-1].resind - k + 1;
 +    chains[i].pdba->nres = nres;
 +    for(j=0; j < chains[i].pdba->nr; j++) {
 +      chains[i].pdba->atom[j].resind -= k;
 +    }
 +    srenew(chains[i].pdba->resinfo,nres);
 +    for(j=0; j<nres; j++) {
 +      chains[i].pdba->resinfo[j] = pdba_all.resinfo[k+j];
 +      snew(chains[i].pdba->resinfo[j].name,1);
 +      *chains[i].pdba->resinfo[j].name = strdup(*pdba_all.resinfo[k+j].name);
 +      /* make all chain identifiers equal to that of the chain */
 +      chains[i].pdba->resinfo[j].chainid = pdb_ch[si].chainid;
 +    }
 +  }
 +
 +  if (nchainmerges>0)
 +    printf("\nMerged chains into joint molecule definitions at %d places.\n\n",
 +           nchainmerges);
 +
 +  printf("There are %d chains and %d blocks of water and "
 +       "%d residues with %d atoms\n",
 +       nch-nwaterchain,nwaterchain,
 +       pdba_all.resinfo[pdba_all.atom[natom-1].resind].nr,natom);
 +        
 +  printf("\n  %5s  %4s %6s\n","chain","#res","#atoms");
 +  for (i=0; (i<nch); i++)
 +    printf("  %d '%c' %5d %6d  %s\n",
 +         i+1, chains[i].chainid ? chains[i].chainid:'-',
 +         chains[i].pdba->nres, chains[i].pdba->nr,
 +         chains[i].bAllWat ? "(only water)":"");
 +  printf("\n");
 +  
 +  check_occupancy(&pdba_all,opt2fn("-f",NFILE,fnm),bVerbose);
 +  
 +  /* Read atomtypes... */
 +  atype = read_atype(ffdir,&symtab);
 +  
 +  /* read residue database */
 +  printf("Reading residue database... (%s)\n",forcefield);
 +  nrtpf = fflib_search_file_end(ffdir,".rtp",TRUE,&rtpf);
 +  nrtp  = 0;
 +  restp = NULL;
 +  for(i=0; i<nrtpf; i++) {
 +    read_resall(rtpf[i],&nrtp,&restp,atype,&symtab,FALSE);
 +    sfree(rtpf[i]);
 +  }
 +  sfree(rtpf);
 +  if (bNewRTP) {
 +    /* Not correct with multiple rtp input files with different bonded types */
 +    fp=gmx_fio_fopen("new.rtp","w");
 +    print_resall(fp,nrtp,restp,atype);
 +    gmx_fio_fclose(fp);
 +  }
 +    
 +  /* read hydrogen database */
 +  nah = read_h_db(ffdir,&ah);
 +  
 +  /* Read Termini database... */
 +  nNtdb=read_ter_db(ffdir,'n',&ntdb,atype);
 +  nCtdb=read_ter_db(ffdir,'c',&ctdb,atype);
 +  
 +  top_fn=ftp2fn(efTOP,NFILE,fnm);
 +  top_file=gmx_fio_fopen(top_fn,"w");
 +
 +  sprintf(generator,"%s - %s",ShortProgram(), GromacsVersion() );
 +
 +  print_top_header(top_file,top_fn,generator,FALSE,ffdir,mHmult);
 +
 +  nincl=0;
 +  nmol=0;
 +  incls=NULL;
 +  mols=NULL;
 +  nres=0;
 +  for(chain=0; (chain<nch); chain++) {
 +    cc = &(chains[chain]);
 +
 +    /* set pdba, natom and nres to the current chain */
 +    pdba =cc->pdba;
 +    x    =cc->x;
 +    natom=cc->pdba->nr;
 +    nres =cc->pdba->nres;
 +    
 +    if (cc->chainid && ( cc->chainid != ' ' ) )
 +      printf("Processing chain %d '%c' (%d atoms, %d residues)\n",
 +            chain+1,cc->chainid,natom,nres);
 +    else
 +      printf("Processing chain %d (%d atoms, %d residues)\n",
 +            chain+1,natom,nres);
 +      
 +    process_chain(pdba,x,bUnA,bUnA,bUnA,bLysMan,bAspMan,bGluMan,
 +                bHisMan,bArgMan,bGlnMan,angle,distance,&symtab,
 +                nrtprename,rtprename);
 +      
 +        cc->chainstart[cc->nterpairs] = pdba->nres;
 +        j = 0;
 +        for(i=0; i<cc->nterpairs; i++)
 +        {
 +            find_nc_ter(pdba,cc->chainstart[i],cc->chainstart[i+1],
 +                        &(cc->r_start[j]),&(cc->r_end[j]),rt);    
 +      
 +            if (cc->r_start[j] >= 0 && cc->r_end[j] >= 0)
 +            {
 +                j++;
 +            }
 +        }
 +        cc->nterpairs = j;
 +        if (cc->nterpairs == 0)
 +        {
 +            printf("Problem with chain definition, or missing terminal residues.\n"
 +                   "This chain does not appear to contain a recognized chain molecule.\n"
 +                   "If this is incorrect, you can edit residuetypes.dat to modify the behavior.\n");
 +        }
 +
 +    /* Check for disulfides and other special bonds */
 +    nssbonds = mk_specbonds(pdba,x,bCysMan,&ssbonds,bVerbose);
 +
 +    if (nrtprename > 0) {        
 +      rename_resrtp(pdba,cc->nterpairs,cc->r_start,cc->r_end,nrtprename,rtprename,
 +                  &symtab,bVerbose);
 +    }
 +    
 +    if (debug) {
 +      if (nch==1) {
 +      sprintf(fn,"chain.pdb");
 +      } else {
 +      sprintf(fn,"chain_%c%d.pdb",cc->chainid,cc->chainnum);
 +      }
 +      write_sto_conf(fn,title,pdba,x,NULL,ePBC,box);
 +    }
 +
 +      
 +    for(i=0; i<cc->nterpairs; i++) 
 +    {
 +        
 +        /* Set termini.
 +         * We first apply a filter so we only have the
 +         * termini that can be applied to the residue in question
 +         * (or a generic terminus if no-residue specific is available).
 +         */
 +        /* First the N terminus */
 +        if (nNtdb > 0) 
 +        {
 +            tdblist = filter_ter(nrtp,restp,nNtdb,ntdb,
 +                                 *pdba->resinfo[cc->r_start[i]].name,
 +                                 *pdba->resinfo[cc->r_start[i]].rtp,
 +                                 &ntdblist);
 +            if(ntdblist==0)
 +            {
 +                printf("No suitable end (N or 5') terminus found in database - assuming this residue\n"
 +                       "is already in a terminus-specific form and skipping terminus selection.\n");
 +                cc->ntdb[i]=NULL;
 +            }
 +            else 
 +            {
 +                if(bTerMan && ntdblist>1)
 +                {
 +                    sprintf(select,"Select start terminus type for %s-%d",
 +                            *pdba->resinfo[cc->r_start[i]].name,
 +                            pdba->resinfo[cc->r_start[i]].nr);
 +                    cc->ntdb[i] = choose_ter(ntdblist,tdblist,select);
 +                }
 +                else
 +                {
 +                    cc->ntdb[i] = tdblist[0];
 +                }
 +                
 +                printf("Start terminus %s-%d: %s\n",
 +                       *pdba->resinfo[cc->r_start[i]].name,
 +                       pdba->resinfo[cc->r_start[i]].nr,
 +                       (cc->ntdb[i])->name);
 +                sfree(tdblist);
 +            }
 +        }
 +        else 
 +        {
 +            cc->ntdb[i] = NULL;
 +        }
 +        
 +        /* And the C terminus */
 +        if (nCtdb > 0)
 +        {
 +            tdblist = filter_ter(nrtp,restp,nCtdb,ctdb,
 +                                 *pdba->resinfo[cc->r_end[i]].name,
 +                                 *pdba->resinfo[cc->r_end[i]].rtp,
 +                                 &ntdblist);
 +            if(ntdblist==0)
 +            {
 +                printf("No suitable end (C or 3') terminus found in database - assuming this residue\n"
 +                       "is already in a terminus-specific form and skipping terminus selection.\n");
 +                cc->ctdb[i] = NULL;
 +            }
 +            else 
 +            {
 +                if(bTerMan && ntdblist>1)
 +                {
 +                    sprintf(select,"Select end terminus type for %s-%d",
 +                            *pdba->resinfo[cc->r_end[i]].name,
 +                            pdba->resinfo[cc->r_end[i]].nr);
 +                    cc->ctdb[i] = choose_ter(ntdblist,tdblist,select);
 +                }
 +                else
 +                {
 +                    cc->ctdb[i] = tdblist[0];
 +                }
 +                printf("End terminus %s-%d: %s\n",
 +                       *pdba->resinfo[cc->r_end[i]].name,
 +                       pdba->resinfo[cc->r_end[i]].nr,
 +                       (cc->ctdb[i])->name);
 +                sfree(tdblist);
 +            }
 +        }
 +        else 
 +        {
 +            cc->ctdb[i] = NULL;
 +        }
 +    }
 +    /* lookup hackblocks and rtp for all residues */
 +    get_hackblocks_rtp(&hb_chain, &restp_chain,
 +                     nrtp, restp, pdba->nres, pdba->resinfo, 
 +                     cc->nterpairs, cc->ntdb, cc->ctdb, cc->r_start, cc->r_end);
 +    /* ideally, now we would not need the rtp itself anymore, but do 
 +     everything using the hb and restp arrays. Unfortunately, that 
 +     requires some re-thinking of code in gen_vsite.c, which I won't 
 +     do now :( AF 26-7-99 */
 +
 +    rename_atoms(NULL,ffdir,
 +               pdba,&symtab,restp_chain,FALSE,rt,FALSE,bVerbose);
 +
 +    match_atomnames_with_rtp(restp_chain,hb_chain,pdba,x,bVerbose);
 +
 +    if (bSort) {
 +      block = new_blocka();
 +      snew(gnames,1);
 +      sort_pdbatoms(pdba->nres,restp_chain,hb_chain,
 +                  natom,&pdba,&x,block,&gnames);
 +      natom = remove_duplicate_atoms(pdba,x,bVerbose);
 +      if (ftp2bSet(efNDX,NFILE,fnm)) {
 +      if (bRemoveH) {
 +        fprintf(stderr,"WARNING: with the -remh option the generated "
 +                "index file (%s) might be useless\n"
 +                "(the index file is generated before hydrogens are added)",
 +                ftp2fn(efNDX,NFILE,fnm));
 +      }
 +      write_index(ftp2fn(efNDX,NFILE,fnm),block,gnames);
 +      }
 +      for(i=0; i < block->nr; i++)
 +      sfree(gnames[i]);
 +      sfree(gnames);
 +      done_blocka(block);
 +    } else {
 +      fprintf(stderr,"WARNING: "
 +            "without sorting no check for duplicate atoms can be done\n");
 +    }
 +
 +    /* Generate Hydrogen atoms (and termini) in the sequence */
 +    printf("Generating any missing hydrogen atoms and/or adding termini.\n");
 +    natom=add_h(&pdba,&x,nah,ah,
 +              cc->nterpairs,cc->ntdb,cc->ctdb,cc->r_start,cc->r_end,bAllowMissing,
 +              NULL,NULL,TRUE,FALSE);
 +    printf("Now there are %d residues with %d atoms\n",
 +         pdba->nres,pdba->nr);
 +    if (debug) write_pdbfile(debug,title,pdba,x,ePBC,box,' ',0,NULL,TRUE);
 +
 +    if (debug)
 +      for(i=0; (i<natom); i++)
 +      fprintf(debug,"Res %s%d atom %d %s\n",
 +              *(pdba->resinfo[pdba->atom[i].resind].name),
 +              pdba->resinfo[pdba->atom[i].resind].nr,i+1,*pdba->atomname[i]);
 +    
 +    strcpy(posre_fn,ftp2fn(efITP,NFILE,fnm));
 +    
 +    /* make up molecule name(s) */
 +
 +      k = (cc->nterpairs>0 && cc->r_start[0]>=0) ? cc->r_start[0] : 0;
 +            
 +    gmx_residuetype_get_type(rt,*pdba->resinfo[k].name,&p_restype);
 +      
 +    suffix[0]='\0';
 +      
 +    if (cc->bAllWat) 
 +    {
 +        sprintf(molname,"Water");
 +    } 
 +    else
 +    {
 +        this_chainid = cc->chainid;
 +        
 +        /* Add the chain id if we have one */
 +        if(this_chainid != ' ')
 +        {
 +            sprintf(buf,"_chain_%c",this_chainid);
 +            strcat(suffix,buf);
 +        }
 +
 +        /* Check if there have been previous chains with the same id */
 +        nid_used = 0;
 +        for(k=0;k<chain;k++)
 +        {
 +            if(cc->chainid == chains[k].chainid)
 +            {
 +                nid_used++;
 +            }
 +        }
 +        /* Add the number for this chain identifier if there are multiple copies */
 +        if(nid_used>0)
 +        {
 +            
 +            sprintf(buf,"%d",nid_used+1);
 +            strcat(suffix,buf);
 +        }
 +
 +        if(strlen(suffix)>0)
 +        {
 +            sprintf(molname,"%s%s",p_restype,suffix);
 +        }
 +        else
 +        {
 +            strcpy(molname,p_restype);
 +        }
 +    }
 +      
 +    if ((nch-nwaterchain>1) && !cc->bAllWat) {
 +      bITP=TRUE;
 +      strcpy(itp_fn,top_fn);
 +      printf("Chain time...\n");
 +      c=strrchr(itp_fn,'.');
 +      sprintf(c,"_%s.itp",molname);
 +      c=strrchr(posre_fn,'.');
 +      sprintf(c,"_%s.itp",molname);
 +      if (strcmp(itp_fn,posre_fn) == 0) {
 +      strcpy(buf_fn,posre_fn);
 +      c  = strrchr(buf_fn,'.');
 +      *c = '\0';
 +      sprintf(posre_fn,"%s_pr.itp",buf_fn);
 +      }
 +      
 +      nincl++;
 +      srenew(incls,nincl);
 +      incls[nincl-1]=strdup(itp_fn);
 +      itp_file=gmx_fio_fopen(itp_fn,"w");
 +    } else
 +      bITP=FALSE;
 +
 +    srenew(mols,nmol+1);
 +    if (cc->bAllWat) {
 +      mols[nmol].name = strdup("SOL");
 +      mols[nmol].nr   = pdba->nres;
 +    } else {
 +      mols[nmol].name = strdup(molname);
 +      mols[nmol].nr   = 1;
 +    }
 +    nmol++;
 +
 +    if (bITP)
 +      print_top_comment(itp_file,itp_fn,generator,ffdir,TRUE);
 +
 +    if (cc->bAllWat)
 +      top_file2=NULL;
 +    else
 +      if (bITP)
 +      top_file2=itp_file;
 +      else
 +      top_file2=top_file;
 +
 +    pdb2top(top_file2,posre_fn,molname,pdba,&x,atype,&symtab,
 +          nrtp,restp,
 +          restp_chain,hb_chain,
 +          cc->nterpairs,cc->ntdb,cc->ctdb,bAllowMissing,
 +          bVsites,bVsiteAromatics,forcefield,ffdir,
 +          mHmult,nssbonds,ssbonds,
 +          long_bond_dist,short_bond_dist,bDeuterate,bChargeGroups,bCmap,
 +          bRenumRes,bRTPresname);
 +    
 +    if (!cc->bAllWat)
 +      write_posres(posre_fn,pdba,posre_fc);
 +
 +    if (bITP)
 +      gmx_fio_fclose(itp_file);
 +
 +    /* pdba and natom have been reassigned somewhere so: */
 +    cc->pdba = pdba;
 +    cc->x = x;
 +    
 +    if (debug) {
 +      if (cc->chainid == ' ')
 +      sprintf(fn,"chain.pdb");
 +      else
 +      sprintf(fn,"chain_%c.pdb",cc->chainid);
 +      cool_quote(quote,255,NULL);
 +      write_sto_conf(fn,quote,pdba,x,NULL,ePBC,box);
 +    }
 +  }
 +
 +  if (watermodel == NULL) {
 +    for(chain=0; chain<nch; chain++) {
 +      if (chains[chain].bAllWat) {
 +      gmx_fatal(FARGS,"You have chosen not to include a water model, but there is water in the input file. Select a water model or remove the water from your input file.");
 +      }
 +    }
 +  } else {
 +    sprintf(buf_fn,"%s%c%s.itp",ffdir,DIR_SEPARATOR,watermodel);
 +    if (!fflib_fexist(buf_fn)) {
 +      gmx_fatal(FARGS,"The topology file '%s' for the selected water model '%s' can not be found in the force field directory. Select a different water model.",
 +              buf_fn,watermodel);
 +    }
 +  }
 +
 +  print_top_mols(top_file,title,ffdir,watermodel,nincl,incls,nmol,mols);
 +  gmx_fio_fclose(top_file);
 +
 +  gmx_residuetype_destroy(rt);
 +    
 +  /* now merge all chains back together */
 +  natom=0;
 +  nres=0;
 +  for (i=0; (i<nch); i++) {
 +    natom+=chains[i].pdba->nr;
 +    nres+=chains[i].pdba->nres;
 +  }
 +  snew(atoms,1);
 +  init_t_atoms(atoms,natom,FALSE);
 +  for(i=0; i < atoms->nres; i++)
 +    sfree(atoms->resinfo[i].name);
 +  sfree(atoms->resinfo);
 +  atoms->nres=nres;
 +  snew(atoms->resinfo,nres);
 +  snew(x,natom);
 +  k=0;
 +  l=0;
 +  for (i=0; (i<nch); i++) {
 +    if (nch>1)
 +      printf("Including chain %d in system: %d atoms %d residues\n",
 +           i+1,chains[i].pdba->nr,chains[i].pdba->nres);
 +    for (j=0; (j<chains[i].pdba->nr); j++) {
 +      atoms->atom[k]=chains[i].pdba->atom[j];
 +      atoms->atom[k].resind += l; /* l is processed nr of residues */
 +      atoms->atomname[k]=chains[i].pdba->atomname[j];
 +      atoms->resinfo[atoms->atom[k].resind].chainid = chains[i].chainid;
 +      copy_rvec(chains[i].x[j],x[k]);
 +      k++;
 +    }
 +    for (j=0; (j<chains[i].pdba->nres); j++) {
 +      atoms->resinfo[l] = chains[i].pdba->resinfo[j];
 +      if (bRTPresname) {
 +      atoms->resinfo[l].name = atoms->resinfo[l].rtp;
 +      }
 +      l++;
 +    }
 +  }
 +  
 +  if (nch>1) {
 +    fprintf(stderr,"Now there are %d atoms and %d residues\n",k,l);
 +    print_sums(atoms, TRUE);
 +  }
 +  
 +  fprintf(stderr,"\nWriting coordinate file...\n");
 +  clear_rvec(box_space);
 +  if (box[0][0] == 0) 
 +    gen_box(0,atoms->nr,x,box,box_space,FALSE);
 +  write_sto_conf(ftp2fn(efSTO,NFILE,fnm),title,atoms,x,NULL,ePBC,box);
 +
 +  printf("\t\t--------- PLEASE NOTE ------------\n");
 +  printf("You have successfully generated a topology from: %s.\n",
 +       opt2fn("-f",NFILE,fnm));
 +  if (watermodel != NULL) {
 +    printf("The %s force field and the %s water model are used.\n",
 +         ffname,watermodel);
 +  } else {
 +    printf("The %s force field is used.\n",
 +         ffname);
 +  }
 +  printf("\t\t--------- ETON ESAELP ------------\n");
 +  
 +
 +  thanx(stdout);
 +  
 +  return 0;
 +}
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge